 /*****************************************************************************/
/*****************************************************************************/
/* fake-linux.h                                                              */
/*                                                                           */
/* Private declarations and fakery to get the Linux TCP stack to build       */
/* outside the kernel tree.                                                  */
/*                                                                           */
/* When structures defined here are modified, THE CORRESPONDING u_xxx        */
/* STRUCTURE in ipv4.h MUST ALSO BE MODIFIED!!!!!                            */
/*                                                                           */
/* Copyright (c) 1999, K A Fraser                                            */
/*****************************************************************************/
/*****************************************************************************/
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the TCP module.
 *
 * Version:	@(#)tcp.h	1.0.5	05/23/93
 *
 * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */

#ifndef __USER_TCP_PRIVATE_H
#define __USER_TCP_PRIVATE_H

// #include <math.h> 

#include "defines.h"

#define __KERNEL__
#ifdef USE_PTH
#include <asm/atomic.h>    // XXXX -- probably fake these to NOPs
#else
#include <./ipv4/atomic.h>
// #include </users/manpreet/work/ibm-summer-03/ultcp/daytona/user-tcp/ipv4/atomic.h>
#endif 

#include <linux/errno.h>   // SAFE -- just a bunch of #define's
#include <linux/types.h>   // SAFE -- basic types and #define's only
#include <asm/system.h>    // XXXX -- probably need to fake some of this out
#include <asm/byteorder.h> // SAFE -- only use this to work out endianess
#include <asm/page.h>      // SAFE -- PAGE_xxx
#include <af_user.h>       // SAFE -- only macros


#ifndef USE_PTH
#include "pth_defs.h" 

#define CONFIG_HZ 100

#define ERESTARTSYS ERESTART
#define ENOIOCTLCMD EOPNOTSUPP 
#define KERN_EMERG      "<0>"   /* system is unusable                   */
#define KERN_ALERT      "<1>"   /* action must be taken immediately     */
#define KERN_CRIT       "<2>"   /* critical conditions                  */
#define KERN_ERR        "<3>"   /* error conditions                     */
#define KERN_WARNING    "<4>"   /* warning conditions                   */
#define KERN_NOTICE     "<5>"   /* normal but significant condition     */
#define KERN_INFO       "<6>"   /* informational                        */
#define KERN_DEBUG      "<7>"   /* debug-level messages                 */


#endif


// make it clear we DON'T want these!
#undef CONFIG_FILTER
#undef CONFIG_NETFILTER
#undef CONFIG_SYN_COOKIES
#undef CONFIG_IP_ROUTE_NAT
#undef CONFIG_IP_MULTICAST
#undef CONFIG_IP_MROUTE
#undef CONFIG_IP_ROUTE_MULTIPATH
#undef CONFIG_IP_ROUTE_VERBOSE
#undef CONFIG_TCP_TW_RECYCLE
#undef CONFIG_IPV6
#undef CONFIG_IPV6_MODULE
#undef CONFIG_PROC_FS
#undef CONFIG_RTNETLINK
#undef CONFIG_SYSCTL
#undef CONFIG_NET_CLS_ROUTE
#undef CONFIG_PROC_FS 

#include <stdio.h>

// referenced before defn.
struct rtable;            struct sockaddr; struct msghdr;
struct poll_table_struct; struct user_pcb; struct file;
extern int errno;
char *strerror(int errno);
#define __init 
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)

#ifndef NDEBUG
#define ASSERT(p) if (!(p)) { printf("%s\n", #p); panic("Assertion failed"); }
#else
#define ASSERT(p) 0
#endif


//THIS ENABLES FULL COUPLING OF THE FLOWS; with coupled decreases too; a version with uncoupled decreases but doubly coupled increases must be implemented too
#define COUPLE_ALL 0
//B is the exponent of the multiplicative decrease value, 1/2^B; B=1 corresponds to halving the window, B=2 corresponds to reducing by a quarter
#define B 1

//A is the gain in the increase phase; how much does the aggregate increase per RTT? 
#define A 1

#define COUPLE_INC 1

/*****************************************************************************/
/* THREADS (pthreads or pth threads) -- XXX pth threads only at the moment!  */
/*****************************************************************************/
#define _BIT(n) (1<<(n))

#define MAX_INTERFACE 15

typedef struct pth_ringnode_st pth_ringnode_t;
struct pth_ringnode_st {
    pth_ringnode_t *rn_next;
    pth_ringnode_t *rn_prev;
};

typedef void *pth_t;

typedef struct pth_mutex_st {
    pth_ringnode_t mx_node;
    int            mx_state;
    pth_t          mx_owner;
    unsigned long  mx_count;
} pth_mutex_t;
#define PTH_MUTEX_INITIALIZED _BIT(0)
#define PTH_MUTEX_LOCKED      _BIT(1)
#define PTH_MUTEX_INIT        { {NULL, NULL}, PTH_MUTEX_INITIALIZED, NULL, 0 }

typedef struct pth_rwlock_st {
    int            rw_state;
    unsigned int   rw_mode;
    unsigned long  rw_readers;
    pth_mutex_t    rw_mutex_rd;
    pth_mutex_t    rw_mutex_rw;
} pth_rwlock_t;
enum { PTH_RWLOCK_RD, PTH_RWLOCK_RW };
#define PTH_RWLOCK_INITIALIZED _BIT(0)
#define PTH_RWLOCK_INIT        { PTH_RWLOCK_INITIALIZED, PTH_RWLOCK_RD, 0, \
                                 PTH_MUTEX_INIT, PTH_MUTEX_INIT }


//getsockopt stuff
#define SO_SNDBUF     7
#define SO_RCVBUF     8

typedef struct pth_cond_st 
{                                     
    unsigned long cn_state;
} pth_cond_t;
#define PTH_COND_INITIALIZED         _BIT(0)
#define PTH_COND_SIGNALED            _BIT(1)
#define PTH_COND_BROADCAST           _BIT(2)
#define PTH_COND_HANDLED             _BIT(3)
#define PTH_COND_INIT                { PTH_COND_INITIALIZED }

typedef void *pth_event_t;

#ifdef USE_PTH
extern int            pth_mutex_init(pth_mutex_t *);
extern int            pth_mutex_acquire(pth_mutex_t *, int, pth_event_t);
extern int            pth_mutex_destroy(pth_mutex_t *);
extern int            pth_mutex_release(pth_mutex_t *);
extern int            pth_rwlock_init(pth_rwlock_t *);
extern int            pth_rwlock_acquire(pth_rwlock_t *, int, int, pth_event_t);
extern int            pth_rwlock_release(pth_rwlock_t *);
extern int            pth_cond_init(pth_cond_t *);
extern int            pth_cond_await(pth_cond_t *, pth_mutex_t *, pth_event_t);
extern int            pth_cond_notify(pth_cond_t *, int);
#endif 

/*****************************************************************************/
/* SPINLOCKS                                                                 */
/*****************************************************************************/
typedef pth_mutex_t spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) PTH_MUTEX_INIT
#define spin_lock_init(x)           pth_mutex_init(x)
#define spin_lock(x)                (pth_mutex_acquire(x, 0, NULL)/*, printk("locked %s\n",  #x)*/)
#define spin_lock_irqsave(x,f)      spin_lock(x)
#define spin_lock_bh(x)             spin_lock(x)
#define spin_lock_irq(x)            spin_lock(x)
#define spin_unlock(x)              pth_mutex_release(x)
#define spin_unlock_irqrestore(x,f) spin_unlock(x)
#define spin_unlock_bh(x)           spin_unlock(x)
#define spin_unlock_irq(x)          spin_unlock(x)

typedef pth_rwlock_t rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) PTH_RWLOCK_INIT
#define read_lock(rw)	           (pth_rwlock_acquire(rw, PTH_RWLOCK_RD, \
                                                      0, NULL)/*, printk("r %s\n", #rw)*/)
#define read_lock_irq(rw)          read_lock(rw)
#define read_lock_bh(rw)           read_lock(rw)
#define read_unlock(rw)            pth_rwlock_release(rw)
#define read_unlock_irq(rw)        read_unlock(rw)
#define read_unlock_bh(rw)         read_unlock(rw)
#define write_lock(rw)             (pth_rwlock_acquire(rw, PTH_RWLOCK_RW, \
                                                      0, NULL)/*, printk("w %s\n", #rw)*/)
#define write_lock_irq(rw)         write_lock(rw)
#define write_lock_bh(rw)          write_lock(rw)
#define write_unlock(rw)           pth_rwlock_release(rw)
#define write_unlock_irq(rw)       write_unlock(rw)
#define write_unlock_bh(rw)        write_unlock(rw)


/*****************************************************************************/
/* WAIT QUEUES and SCHEDULING                                                */
/* These are to be replaced by wait/notify on condition variables, I think.  */
/*****************************************************************************/
typedef struct {} wait_queue_t;
typedef pth_cond_t wait_queue_head_t;
#define DECLARE_WAITQUEUE(a,b)        struct wait_queue *(a) = 0
#define DECLARE_WAIT_QUEUE_HEAD(a)    wait_queue_head_t (a);
#define init_waitqueue_head(a)        pth_cond_init(a)
#define wake_up(p)                    pth_cond_notify(p, 1)
#define wake_up_interruptible(a)      wake_up(a)

/*****************************************************************************/
/* MEMORY                                                                    */
/* Faked to malloc/free for now. May reimplement caches more efficiently at  */
/* some point.                                                               */
/*****************************************************************************/
void *malloc(int);
void free(void *);
#define kmalloc(size,type) malloc(size) 
#define kfree(p)           free(p)
#define kfree_s(p,s)       free(p)  /* fairly sure this is okay */

typedef struct cache_block_st cache_block_t;
struct cache_block_st { cache_block_t *nb; };
typedef struct free_item_st free_item_t;
struct free_item_st { free_item_t *nf; };
typedef struct { int size; free_item_t *fl; cache_block_t *fb; } kmem_cache_t;

extern kmem_cache_t *__kmem_cache_create(int size);
extern void *__kmem_cache_alloc(kmem_cache_t *cache);
extern void __kmem_cache_free(kmem_cache_t *cache, void *p);
extern void __kmem_cache_destroy(kmem_cache_t *cache);
#define kmem_cache_create(name,sz,off,flags,ctor,dtor) __kmem_cache_create(sz)
#define kmem_cache_alloc(c,f) __kmem_cache_alloc(c)
#define kmem_cache_free(c,p) __kmem_cache_free(c,p)
#define kmem_cache_destroy(c) __kmem_cache_destroy(c)

#define GFP_ATOMIC 0
#define GFP_KERNEL 0
#define SMP_CACHE_BYTES 32

/*****************************************************************************/
/* TIME                                                                      */
/* The TCP counts everything in jiffies, which is faked out to a syscall.    */
/*****************************************************************************/
extern struct { unsigned long volatile *jiffies; struct timeval *tv; } tmaps;
struct timeval { long int tv_sec; long int tv_usec; };
#define gettimeofday(__tv,__tz) (*(__tv)=*tmaps.tv)
#define do_gettimeofday(__tv)   (*(__tv)=*tmaps.tv)
#include <asm/param.h> /* to get HZ */
#define jiffies (*tmaps.jiffies)
#define xtime (*tmaps.tv)

/*****************************************************************************/
/* BOTTOM HALF                                                               */
/* Doesn't even exist in user space. Just fake it.                           */
/*****************************************************************************/
#define local_bh_disable() 1
#define local_bh_enable() 1

/*****************************************************************************/
/* SIGNALS                                                                   */
/*****************************************************************************/
/*
 * used for sending SIGPIPE back to client *only*... not exactly essential we
 * support this, so I'll NOP it for now.
 */
#define send_sig(a,b,c) 1    // sent with no problems, oh yes!
//#define SIGPIPE 0

/*
 * in the kernel, certain services must check if a signal is pending and
 * allow themselves to be interrupted if so. We're not in the kernel, so we 
 * don't do it!
 */
#define signal_pending(a) 0  // XXX never anything pending...

/*
 * Don't think we need this yet, either (at least, certainly not with signals!)
 */
#define sock_wake_async(a,b,c) 0
//#define POLL_IN, POLL_OUT, POLL_HUP, POLL_PRI 0

/*****************************************************************************/
/* CHECKSUMS                                                                 */
/*****************************************************************************/
/*
 * KAF (31/03/00): In the kernel, it is only possible to offload receive
 * checksums. We ought, therefore, to always do transmit checksums.
 */
#if 1
//#ifndef __TEST_NO_CHECKSUMS
// sum + len bytes from buf -> 32-bit csum
// asmlinkage u_int csum_partial(const u_char *buff, int len, u_int sum);
u_int csum_partial(const u_char *buff, int len, u_int sum);

// sum + len bytes from (src->dst) -> 32-bit csum
u_int csum_partial_copy(const char *src, char *dst, int len, int sum);

// now equivalent, as we need never check for faults.
#define csum_partial_copy_nocheck csum_partial_copy

// as csum_partial_copy, but we zero out the error arg.
#define csum_and_copy_from_user(src,dst,len,sum,err) \
    (*(err) = 0, csum_partial_copy(src,dst,len,sum))

static inline unsigned int csum_fold(u_int sum)
{
    __asm__("addl %1, %0 ; adcl $0xffff, %0"
            : "=r" (sum)
            : "r" (sum << 16), "0" (sum & 0xffff0000));
    return (~sum) >> 16;
}
 
static inline u_long csum_tcpudp_nofold(
    u_long saddr, u_long daddr, u_short len, u_short proto, u_int sum) 
{
    __asm__("addl %1, %0 ; adcl %2, %0 ; adcl %3, %0 ; adcl $0, %0"
	    : "=r" (sum)
	    : "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
    return sum;
}

static inline u_short csum_tcpudp_magic(
    u_long saddr, u_long daddr, u_short len, u_short proto, u_int sum)
{
    return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}

/*
 *	This is a version of ip_compute_csum() optimized for IP headers,
 *	which always checksum on 4 octet boundaries.
 *
 *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
 *	Arnt Gulbrandsen.
 */
static inline unsigned short ip_fast_csum(unsigned char * iph,
					  unsigned int ihl) {
    unsigned int sum;

    __asm__ __volatile__(" \
	    movl (%1), %0; \
	    subl $4, %2; \
	    jbe 2f; \
	    addl 4(%1), %0; \
	    adcl 8(%1), %0; \
	    adcl 12(%1), %0; \
1:	    adcl 16(%1), %0; \
	    lea 4(%1), %1; \
	    decl %2; \
	    jne	1b; \
	    adcl $0, %0; \
	    movl %0, %2; \
	    shrl $16, %0; \
	    addw %w2, %w0; \
	    adcl $0, %0; \
	    notl %0; \
2: \
	    "
			 /* Since the input registers which are loaded with iph and ipl
			    are modified, we must also specify them as outputs, or gcc
			    will assume they contain their original values. */
			 : "=r" (sum), "=r" (iph), "=r" (ihl)
			 : "1" (iph), "2" (ihl));
    return(sum);
}

#else

#define csum_partial(b,l,su) 0
#define csum_partial_copy(s,d,l,su) (memcpy(d,s,l), 0)
#define csum_partial_copy_nocheck csum_partial_copy
#define csum_and_copy_from_user(src,dst,len,sum,err) \
    (*(err) = 0, csum_partial_copy(src,dst,len,sum))
#define csum_fold(s) 0
#define csum_tcpudp_nofold(a,b,c,d,e) 0
#define csum_tcpudp_magic(a,b,c,d,e) 0
#define ip_fast_csum(a,b) 0

#endif

/*****************************************************************************/
/* USER SPACE TRANSFERS                                                      */
/* Not an issue, so replaced by simple memcpy().                             */
/*****************************************************************************/
#define get_user(a,b) (((a) = *(b)), 0)
#define put_user(a,b) ((*(b) = (a)), 0)
#define copy_from_user(to,from,len) (memcpy((to),(from),(len)), 0)
#define copy_to_user(to,from,len)   (memcpy((to),(from),(len)), 0)
void *memmove(void *, const void *, size_t);

/*****************************************************************************/
/* IOCTL COMMANDS                                                            */
/* These are the commands supported by tcp_ioctl(). Values taken from Linux, */
/* but have no particular meaning.                                           */
/*****************************************************************************/
#define SIOCATMARK 0x8905
#define TIOCINQ    0x541b
#define TIOCOUTQ   0x5411
#define ip_rt_ioctl(a,b) (-EINVAL)
#define arp_ioctl(a,b) panic("arp_ioctl() not implemented")
#define devinet_ioctl(a,b) panic("devinet_ioctl() not implemented")
#define dev_ioctl(a,b) panic("dev_ioctl() not implemented")

/*****************************************************************************/
/* MISCELLANEOUS STUFF                                                       */
/* Random bits and bobs, some of which will require more thought to work     */
/* properly.                                                                 */
/*****************************************************************************/
int printf(const char *format, ...);
#define printk printf
#define panic(s)                               \
({                                             \
    printf("%s:%d:"s"\n", __FILE__, __LINE__); \
    *((int*)0) = 0; })
#define neigh_confirm(p) 0

// the following we simply do need to support. Mostly mc stuff.
#define ip_mc_join_group(sk,mreq)  panic("ip_mc_join_group: not supported!")
#define ip_mc_leave_group(sk,mreq) panic("ip_mc_leave_group: not supported!")
#define netif_rx(skb) 0

// valid for Ethernet with no IP tunneling, IPv6, ...
#define MAX_HEADER 48

#define icmp_init(a) 0
#define arp_init() 0
// #define O_NONBLOCK 0
#define O_NONBLOCK 2048 
#define BUG_TRAP(b) 0
#define kill_proc(a,b,c) 0
#define kill_pg(a,b,c) 0

/*
 * At least have some kind of stab at a decent sequnce number seed. (KAF)
 */
/*
int rand(void);
#define secure_tcp_sequence_number(a,b,c,d) 0
//  ((u32)rand()^(u32)(rand()<<10)^(u32)a^(u32)b^(u32)c^(u32)d)
*/

// #define secure_tcp_sequence_number(a,b,c,d) 0
#define secure_tcp_sequence_number(a,b,c,d) ((u32)rand()^(u32)(rand()<<10)^(u32)a^(u32)b^(u32)c^(u32)d)


/*
 * No security policy. (KAF)
 */
#define ipsec_sk_policy(a,b) 1

void request_tx_space_upcall(struct user_pcb *pcb);
int free_tx_skbs(struct user_pcb *pcb);


/*****************************************************************************/
/* SYSCALLS                                                                  */
/*****************************************************************************/
int socket(int, int, int);
int bind(int, struct sockaddr *, int);
int connect(int, struct sockaddr *, int);
int sendmsg(int, const struct msghdr *, unsigned int);


/*****************************************************************************/
/*** XXX -- from linux/rtnetlink.h.h */
/* RTM_METRICS --- array of struct rtattr with types of RTAX_* */
enum
{
    RTAX_UNSPEC,
#define RTAX_UNSPEC RTAX_UNSPEC
    RTAX_LOCK,
#define RTAX_LOCK RTAX_LOCK
    RTAX_MTU,
#define RTAX_MTU RTAX_MTU
    RTAX_WINDOW,
#define RTAX_WINDOW RTAX_WINDOW
    RTAX_RTT,
#define RTAX_RTT RTAX_RTT
    RTAX_RTTVAR,
#define RTAX_RTTVAR RTAX_RTTVAR
    RTAX_SSTHRESH,
#define RTAX_SSTHRESH RTAX_SSTHRESH
    RTAX_CWND,
#define RTAX_CWND RTAX_CWND
    RTAX_ADVMSS,
#define RTAX_ADVMSS RTAX_ADVMSS
};
#define RTAX_MAX RTAX_ADVMSS
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/iovec.h */
struct iovec
{
    void         *iov_base;
    unsigned int  iov_len;
};
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from net/snmp.h */
struct ip_mib
{
    unsigned long	IpForwarding;
    unsigned long	IpDefaultTTL;
    unsigned long	IpInReceives;
    unsigned long	IpInHdrErrors;
    unsigned long	IpInAddrErrors;
    unsigned long	IpForwDatagrams;
    unsigned long	IpInUnknownProtos;
    unsigned long	IpInDiscards;
    unsigned long	IpInDelivers;
    unsigned long	IpOutRequests;
    unsigned long	IpOutDiscards;
    unsigned long	IpOutNoRoutes;
    unsigned long	IpReasmTimeout;
    unsigned long	IpReasmReqds;
    unsigned long	IpReasmOKs;
    unsigned long	IpReasmFails;
    unsigned long	IpFragOKs;
    unsigned long	IpFragFails;
    unsigned long	IpFragCreates;
};
 
struct ipv6_mib
{
    unsigned long	Ip6InReceives;
    unsigned long	Ip6InHdrErrors;
    unsigned long	Ip6InTooBigErrors;
    unsigned long	Ip6InNoRoutes;
    unsigned long	Ip6InAddrErrors;
    unsigned long	Ip6InUnknownProtos;
    unsigned long	Ip6InTruncatedPkts;
    unsigned long	Ip6InDiscards;
    unsigned long	Ip6InDelivers;
    unsigned long	Ip6OutForwDatagrams;
    unsigned long	Ip6OutRequests;
    unsigned long	Ip6OutDiscards;
    unsigned long	Ip6OutNoRoutes;
    unsigned long	Ip6ReasmTimeout;
    unsigned long	Ip6ReasmReqds;
    unsigned long	Ip6ReasmOKs;
    unsigned long	Ip6ReasmFails;
    unsigned long	Ip6FragOKs;
    unsigned long	Ip6FragFails;
    unsigned long	Ip6FragCreates;
    unsigned long	Ip6InMcastPkts;
    unsigned long	Ip6OutMcastPkts;
};
 
struct icmp_mib
{
    unsigned long	IcmpInMsgs;
    unsigned long	IcmpInErrors;
    unsigned long	IcmpInDestUnreachs;
    unsigned long	IcmpInTimeExcds;
    unsigned long	IcmpInParmProbs;
    unsigned long	IcmpInSrcQuenchs;
    unsigned long	IcmpInRedirects;
    unsigned long	IcmpInEchos;
    unsigned long	IcmpInEchoReps;
    unsigned long	IcmpInTimestamps;
    unsigned long	IcmpInTimestampReps;
    unsigned long	IcmpInAddrMasks;
    unsigned long	IcmpInAddrMaskReps;
    unsigned long	IcmpOutMsgs;
    unsigned long	IcmpOutErrors;
    unsigned long	IcmpOutDestUnreachs;
    unsigned long	IcmpOutTimeExcds;
    unsigned long	IcmpOutParmProbs;
    unsigned long	IcmpOutSrcQuenchs;
    unsigned long	IcmpOutRedirects;
    unsigned long	IcmpOutEchos;
    unsigned long	IcmpOutEchoReps;
    unsigned long	IcmpOutTimestamps;
    unsigned long	IcmpOutTimestampReps;
    unsigned long	IcmpOutAddrMasks;
    unsigned long	IcmpOutAddrMaskReps;
};

struct icmpv6_mib
{
    unsigned long	Icmp6InMsgs;
    unsigned long	Icmp6InErrors;

    unsigned long	Icmp6InDestUnreachs;
    unsigned long	Icmp6InPktTooBigs;
    unsigned long	Icmp6InTimeExcds;
    unsigned long	Icmp6InParmProblems;

    unsigned long	Icmp6InEchos;
    unsigned long	Icmp6InEchoReplies;
    unsigned long	Icmp6InGroupMembQueries;
    unsigned long	Icmp6InGroupMembResponses;
    unsigned long	Icmp6InGroupMembReductions;
    unsigned long	Icmp6InRouterSolicits;
    unsigned long	Icmp6InRouterAdvertisements;
    unsigned long	Icmp6InNeighborSolicits;
    unsigned long	Icmp6InNeighborAdvertisements;
    unsigned long	Icmp6InRedirects;

    unsigned long	Icmp6OutMsgs;

    unsigned long	Icmp6OutDestUnreachs;
    unsigned long	Icmp6OutPktTooBigs;
    unsigned long	Icmp6OutTimeExcds;
    unsigned long	Icmp6OutParmProblems;

    unsigned long	Icmp6OutEchoReplies;
    unsigned long	Icmp6OutRouterSolicits;
    unsigned long	Icmp6OutNeighborSolicits;
    unsigned long	Icmp6OutNeighborAdvertisements;
    unsigned long	Icmp6OutRedirects;
    unsigned long	Icmp6OutGroupMembResponses;
    unsigned long	Icmp6OutGroupMembReductions;
};
 
struct tcp_mib
{
    unsigned long	TcpRtoAlgorithm;
    unsigned long	TcpRtoMin;
    unsigned long	TcpRtoMax;
    unsigned long	TcpMaxConn;
    unsigned long	TcpActiveOpens;
    unsigned long	TcpPassiveOpens;
    unsigned long	TcpAttemptFails;
    unsigned long	TcpEstabResets;
    unsigned long	TcpCurrEstab;
    unsigned long	TcpInSegs;
    unsigned long	TcpOutSegs;
    unsigned long	TcpRetransSegs;
    unsigned long	TcpInErrs;
    unsigned long	TcpOutRsts;
};
 
struct udp_mib
{
    unsigned long	UdpInDatagrams;
    unsigned long	UdpNoPorts;
    unsigned long	UdpInErrors;
    unsigned long	UdpOutDatagrams;
};

struct linux_mib 
{
    unsigned long	SyncookiesSent;
    unsigned long	SyncookiesRecv;
    unsigned long	SyncookiesFailed;
    unsigned long	EmbryonicRsts;
    unsigned long	PruneCalled; 
    unsigned long	RcvPruned;
    unsigned long	OfoPruned;
    unsigned long	OutOfWindowIcmps; 
    unsigned long	LockDroppedIcmps; 
};

extern struct ip_mib    ip_statistics;
extern struct linux_mib net_statistics;
extern struct tcp_mib   tcp_statistics;

/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/timer.h */
struct timer_list {
    int  index;
    unsigned long expires;
    unsigned long data;
    void (*function)(unsigned long);
};

int init_timers(void);     // KAF NEW
void cleanup_timers(void); // KAF NEW
extern void add_timer(struct timer_list * timer);
extern int  del_timer(struct timer_list * timer);
void mod_timer(struct timer_list *timer, unsigned long expires);

extern inline void init_timer(struct timer_list * timer)
{
    timer->index = -1;
}

#define time_after(a,b)		((long)(b) - (long)(a) < 0)
#define time_before(a,b)	time_after(b,a)
/*****************************************************************************/


/*****************************************************************************/
/*** XXX KAF -- from linux/tcp.h */
struct tcphdr {
    __u16	source;
    __u16	dest;
    __u32	seq;
    __u32	ack_seq;
#if defined(__LITTLE_ENDIAN_BITFIELD)
    __u16	res1:4,
		     doff:4,
			  fin:1,
			      syn:1,
				  rst:1,
				      psh:1,
					  ack:1,
					      urg:1,
						  res2:2;
#elif defined(__BIG_ENDIAN_BITFIELD)
    __u16	doff:4,
		     res1:4,
			  res2:2,
			       urg:1,
				   ack:1,
				       psh:1,
					   rst:1,
					       syn:1,
						   fin:1;
#else
#error	"Adjust your <asm/byteorder.h> defines"
#endif	
    __u16	window;
    __u16	check;
    __u16	urg_ptr;
};


enum {
    TCP_ESTABLISHED = 1,
    TCP_SYN_SENT,
    TCP_SYN_RECV,
    TCP_FIN_WAIT1,
    TCP_FIN_WAIT2,
    TCP_TIME_WAIT,
    TCP_CLOSE,
    TCP_CLOSE_WAIT,
    TCP_LAST_ACK,
    TCP_LISTEN,
    TCP_CLOSING,	 /* now a valid state */

    TCP_MAX_STATES /* Leave at the end! */
};

#define TCP_STATE_MASK	0xF
#define TCP_ACTION_FIN	(1 << 7)

enum {
    TCPF_ESTABLISHED = (1 << 1),
    TCPF_SYN_SENT  = (1 << 2),
    TCPF_SYN_RECV  = (1 << 3),
    TCPF_FIN_WAIT1 = (1 << 4),
    TCPF_FIN_WAIT2 = (1 << 5),
    TCPF_TIME_WAIT = (1 << 6),
    TCPF_CLOSE     = (1 << 7),
    TCPF_CLOSE_WAIT = (1 << 8),
    TCPF_LAST_ACK  = (1 << 9),
    TCPF_LISTEN    = (1 << 10),
    TCPF_CLOSING   = (1 << 11) 
};

/*
 *	The union cast uses a gcc extension to avoid aliasing problems
 *  (union is compatible to any of its members)
 *  This means this part of the code is -fstrict-aliasing safe now.
 */
union tcp_word_hdr { 
    struct tcphdr hdr;
    __u32 		  words[5];
}; 

#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 

enum { 
    TCP_FLAG_URG = __constant_htonl(0x00200000), 
    TCP_FLAG_ACK = __constant_htonl(0x00100000), 
    TCP_FLAG_PSH = __constant_htonl(0x00080000), 
    TCP_FLAG_RST = __constant_htonl(0x00040000), 
    TCP_FLAG_SYN = __constant_htonl(0x00020000), 
    TCP_FLAG_FIN = __constant_htonl(0x00010000),
    TCP_RESERVED_BITS = __constant_htonl(0x0FC00000),
    TCP_DATA_OFFSET = __constant_htonl(0xF0000000)
}; 

/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/socket.h */
typedef unsigned short sa_family_t;
 
struct sockaddr {
    sa_family_t	sa_family;	/* address family, AF_xxx	*/
    char		sa_data[14];	/* 14 bytes of protocol address	*/
};

struct msghdr {
    void	*	msg_name;	/* Socket name			*/
    int		msg_namelen;	/* Length of name		*/
    struct iovec *	msg_iov;	/* Data blocks			*/
    __kernel_size_t	msg_iovlen;	/* Number of blocks		*/
    void 	*	msg_control;	/* Per protocol magic           */
    __kernel_size_t	msg_controllen;	/* Length of cmsg list          */
    unsigned	msg_flags;
};

/*
 *	POSIX 1003.1g - ancillary data object information
 *	Ancillary data consits of a sequence of pairs of
 *	(cmsghdr, cmsg_data[])
 */
struct cmsghdr {
    __kernel_size_t	cmsg_len;	/* data byte count, including hdr */
    int		cmsg_level;	/* originating protocol */
    int		cmsg_type;	/* protocol-specific type */
};

/*
 *	Ancilliary data object information MACROS
 *	Table 5-14 of POSIX 1003.1g
 */
#define __CMSG_NXTHDR(ctl, len, cmsg) __cmsg_nxthdr((ctl),(len),(cmsg))
#define CMSG_NXTHDR(mhdr, cmsg) cmsg_nxthdr((mhdr), (cmsg))

#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )

#define CMSG_DATA(cmsg)	((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr))))
#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len))
#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len))

#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \
				  (struct cmsghdr *)(ctl) : \
				  (struct cmsghdr *)NULL)
#define CMSG_FIRSTHDR(msg)	__CMSG_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen)

/*
 *	Get the next cmsg header
 *
 *	PLEASE, do not touch this function. If you think, that it is
 *	incorrect, grep kernel sources and think about consequences
 *	before trying to improve it.
 *
 *	Now it always returns valid, not truncated ancillary object
 *	HEADER. But caller still MUST check, that cmsg->cmsg_len is
 *	inside range, given by msg->msg_controllen before using
 *	ansillary object DATA.				--ANK (980731)
 */
extern __inline__ struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
						 struct cmsghdr *__cmsg)
{
    struct cmsghdr * __ptr;

    __ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) +  CMSG_ALIGN(__cmsg->cmsg_len));
    if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size)
	return NULL;

    return __ptr;
}

extern __inline__ struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
{
    return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}

/* Socket types. */
#define SOCK_STREAM	1		/* stream (connection) socket	*/
#define SOCK_DGRAM	2		/* datagram (conn.less) socket	*/
#define SOCK_RAW        3

/* Supported address families. */
#define AF_UNSPEC	0
#define AF_INET		2	/* Internet IP Protocol 	*/

/* Protocol families, same as address families. */
#define PF_UNSPEC	AF_UNSPEC
#define PF_INET		AF_INET

/* Maximum queue length specifiable by listen.  */
#define SOMAXCONN	128

/* Flags we can use with send/ and recv. 
   Added those for 1003.1g not all are supported yet
 */
 
#define MSG_OOB		1
#define MSG_PEEK	2
#define MSG_DONTROUTE	4
#define MSG_TRYHARD     4       /* Synonym for MSG_DONTROUTE for DECnet */
#define MSG_CTRUNC	8
#define MSG_PROBE	0x10	/* Do not send. Only probe path f.e. for MTU */
#define MSG_TRUNC	0x20
#define MSG_DONTWAIT	0x40	/* Nonblocking io		 */
#define MSG_EOR         0x80	/* End of record */
#define MSG_WAITALL	0x100	/* Wait for a full request */
#define MSG_FIN         0x200
#define MSG_SYN		0x400
#define MSG_CONFIRM	0x800	/* Confirm path validity */
#define MSG_RST		0x1000
#define MSG_ERRQUEUE	0x2000	/* Fetch message from error queue */
#define MSG_NOSIGNAL	0x4000	/* Do not generate SIGPIPE */

#define MSG_EOF         MSG_FIN

/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
#define SOL_IP		0
#define SOL_SOCKET	1
#define SOL_TCP		6
#define SOL_UDP		17
#define SOL_RAW		255


/* TCP options - this way around because someone left a set in the c library includes */
#define TCP_NODELAY	1
#define TCP_MAXSEG	2
#define TCP_CORK	3	/* Linux specific (for use with sendfile) */
#define TCP_KEEPIDLE	4
#define TCP_KEEPINTVL	5
#define TCP_KEEPCNT	6
#define TCP_SYNCNT	7


extern int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, 
			       int offset, int len);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 
                                          struct iovec *iov, 
                                          int offset, 
                                          unsigned int len, int *csump);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
/*****************************************************************************/

/*****************************************************************************/
/*** XXX KAF -- from linux/net.h */
typedef enum {
    SS_FREE = 0,				/* not allocated		*/
    SS_UNCONNECTED,			/* unconnected to any socket	*/
    SS_CONNECTING,			/* in process of connecting	*/
    SS_CONNECTED,				/* connected to socket		*/
    SS_DISCONNECTING			/* in process of disconnecting	*/
} socket_state;

#define SO_ACCEPTCON	(1<<16)		/* performed a listen		*/
#define SO_WAITDATA	(1<<17)		/* wait data to read		*/
#define SO_NOSPACE	(1<<18)		/* no space to write		*/

struct socket
{
  socket_state		state;
  unsigned long		flags;
  struct sock		*sk;
  wait_queue_head_t	wait;
  short			type;
  struct multipath_pcb* mpcb;
};

/*
 * New function -- KAF (17/12/99). Releases resources external to stack.
 */
struct user_pcb;
struct multipath_pcb;
struct mtcp_opt;
struct multipath_options;
extern void detach_sock_from_nic(struct user_pcb *pcb);
struct user_pcb *pcb_from_sock(struct socket *sock);
struct multipath_pcb* mpcb_from_sock(struct socket* sock);

struct scm_cookie;
struct vm_area_struct;

struct net_proto_family 
{
    int	family;
    int	(*create)(struct socket *sock, int protocol);
};

//extern int	sock_wake_async(struct socket *sk, int how, int band);
#define net_ratelimit() 0 /* No rate limitation! */
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/skbuff.h */
#define CHECKSUM_NONE 0
#define CHECKSUM_HW 1
#define CHECKSUM_UNNECESSARY 2

#ifdef __i386__
#define NET_CALLER(arg) (*(((void**)&arg)-1))
#else
#define NET_CALLER(arg) __builtin_return_address(0)
#endif

struct sk_buff_head {
    /* These two members must be first. */
    struct sk_buff	* next;
    struct sk_buff	* prev;

    __u32		qlen;
    spinlock_t      lock;
};

struct sk_buff {
    /* These two members must be first. */
    struct sk_buff *next;		/* Next buffer in list 	*/
    struct sk_buff *prev;		/* Previous buffer in list */

    struct sk_buff_head *list;		/* List we are on	*/
    struct sock	  *sk;			/* Socket we are owned by */
    struct sock   *data_sk;             /* Socket the data fields owned by */
    struct net_device *dev;	
    struct net_device *rx_dev;

    /* Transport layer header */
    union
    {
        struct tcphdr   *th;
        struct udphdr   *uh;
        struct icmphdr  *icmph;
        unsigned char   *raw;
    } h;

    /* Network layer header */
    union
    {
        struct iphdr  *iph;
        unsigned char *raw;
    } nh;
  
    /* Link layer header */
    union 
    {	
        struct ethhdr *ethernet;
        unsigned char *raw;
    } mac;

    struct  dst_entry *dst;
    
  //used to be 48, added 4 bytes for mpath
    char	cb[56];
    
    unsigned int 	len;	/* Length of actual data*/
    unsigned int	csum;	/* Checksum 		*/
    volatile char 	used;	/* Data moved to user and not MSG_PEEK */
    unsigned char	cloned, /* head may be cloned (check refcnt to be s).*/
        pkt_type,		/* Packet class		*/
        ip_summed,		/* Driver fed us an IP checksum	*/
        rx_buf;                 /* This is received data (KAF NEW) */ 
    __u32 priority;             /* Packet queuing priority */
    atomic_t	users;	        /* User count - see datagram.c,tcp.c 	*/

    unsigned short  protocol;   /* Packet protocol from driver.*/
    unsigned int    truesize;	/* Buffer size 		*/

    u_char *d_head, *d_data, *d_tail, *d_end;
    u_char *h_head, *h_data, *h_tail, *h_end;

    void 		(*destructor)(struct sk_buff *);
};

extern void			__kfree_skb(struct sk_buff *skb);
extern void			skb_queue_head_init(struct sk_buff_head *list);
extern void			skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
extern void			skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
void skb_queue_order(struct sk_buff_head* list,struct sk_buff* skb);

extern struct sk_buff *		skb_dequeue(struct sk_buff_head *list);
extern void 			skb_insert(struct sk_buff *old,struct sk_buff *newsk);
extern __u32			skb_queue_len(struct sk_buff_head *list);
extern struct sk_buff *		skb_peek_copy(struct sk_buff_head *list);
extern struct sk_buff *		alloc_skb(struct sock *sk, unsigned int size);
extern void			kfree_skbmem(struct sock *sk, 
                                             struct sk_buff *skb);
extern struct sk_buff *		skb_clone(struct sk_buff *skb, int priority);
extern struct sk_buff *		skb_copy(struct sk_buff *skb, int priority);
extern struct sk_buff *		skb_realloc_headroom(struct sk_buff *skb, int newheadroom);
#define dev_kfree_skb(a)	kfree_skb(a)
extern void	skb_over_panic(struct sk_buff *skb, int len, char *fn);
extern void	skb_under_panic(struct sk_buff *skb, int len, char *fn);

/* Internal */
extern __inline__ atomic_t *skb_datarefp(struct sk_buff *skb)
{
    return (atomic_t *)(skb->d_end);
}

extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
{
    return (list->next == (struct sk_buff *) list);
}

extern __inline__ void kfree_skb(struct sk_buff *skb)
{
    if (atomic_dec_and_test(&skb->users))
	__kfree_skb(skb);
}

extern __inline__ int skb_cloned(struct sk_buff *skb)
{
    return skb->cloned && atomic_read(skb_datarefp(skb)) != 1;
}

extern __inline__ int skb_shared(struct sk_buff *skb)
{
    return (atomic_read(&skb->users) != 1);
}

extern __inline__ struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{
    if (skb_shared(skb)) {
	struct sk_buff *nskb;
	nskb = skb_clone(skb, pri);
	kfree_skb(skb);
	return nskb;
    }
    return skb;
}

/*
 *	Peek an sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. For an interrupt
 *	type system cli() peek the buffer copy the data and sti();
 */
 
extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
{
    struct sk_buff *list = ((struct sk_buff *)list_)->next;
    if (list == (struct sk_buff *)list_)
	list = NULL;
    return list;
}

extern __inline__ struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{
    struct sk_buff *list = ((struct sk_buff *)list_)->prev;
    if (list == (struct sk_buff *)list_)
	list = NULL;
    return list;
}

/*
 *	Return the length of an sk_buff queue
 */
 
extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
{
    return(list_->qlen);
}

extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
{
    spin_lock_init(&list->lock);
    list->prev = (struct sk_buff *)list;
    list->next = (struct sk_buff *)list;
    list->qlen = 0;
}

/*
 *	Insert an sk_buff at the start of a list.
 *
 *	The "__skb_xxxx()" functions are the non-atomic ones that
 *	can only be called with interrupts disabled.
 */

extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
    struct sk_buff *prev, *next;

    newsk->list = list;
    list->qlen++;
    prev = (struct sk_buff *)list;
    next = prev->next;
    newsk->next = next;
    newsk->prev = prev;
    next->prev = newsk;
    prev->next = newsk;
}

extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
    unsigned long flags;

    spin_lock_irqsave(&list->lock, flags);
    __skb_queue_head(list, newsk);
    spin_unlock_irqrestore(&list->lock, flags);
}

/*
 *	Insert an sk_buff at the end of a list.
 */

extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
    struct sk_buff *prev, *next;

    newsk->list = list;
    list->qlen++;
    next = (struct sk_buff *)list;
    prev = next->prev;
    newsk->next = next;
    newsk->prev = prev;
    next->prev = newsk;
    prev->next = newsk;
}

extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
    unsigned long flags;

    spin_lock_irqsave(&list->lock, flags);
    __skb_queue_tail(list, newsk);
    spin_unlock_irqrestore(&list->lock, flags);
}

/*
 *	Remove an sk_buff from a list.
 */

extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
    struct sk_buff *next, *prev, *result;

    prev = (struct sk_buff *) list;
    next = prev->next;
    result = NULL;
    if (next != prev) {
	result = next;
	next = next->next;
	list->qlen--;
	next->prev = prev;
	prev->next = next;
	result->next = NULL;
	result->prev = NULL;
	result->list = NULL;
    }
    return result;
}

extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
{
    long flags;
    struct sk_buff *result;

    spin_lock_irqsave(&list->lock, flags);
    result = __skb_dequeue(list);
    spin_unlock_irqrestore(&list->lock, flags);
    return result;
}

/*
 *	Insert a packet on a list.
 */

extern __inline__ void __skb_insert(struct sk_buff *newsk,
				    struct sk_buff * prev, struct sk_buff *next,
				    struct sk_buff_head * list)
{
    newsk->next = next;
    newsk->prev = prev;
    next->prev = newsk;
    prev->next = newsk;
    newsk->list = list;
    list->qlen++;
}

/*
 *	Place a packet before a given packet in a list
 */
extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
{
    unsigned long flags;

    spin_lock_irqsave(&old->list->lock, flags);
    __skb_insert(newsk, old->prev, old, old->list);
    spin_unlock_irqrestore(&old->list->lock, flags);
}

/*
 *	Place a packet after a given packet in a list.
 */

extern __inline__ void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
{
    __skb_insert(newsk, old, old->next, old->list);
}

/*
 * remove sk_buff from list. _Must_ be called atomically, and with
 * the list known..
 */
extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
    struct sk_buff * next, * prev;

    list->qlen--;
    next = skb->next;
    prev = skb->prev;
    skb->next = NULL;
    skb->prev = NULL;
    skb->list = NULL;
    next->prev = prev;
    prev->next = next;
}


/* XXX: more streamlined implementation */
extern __inline__ struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
    struct sk_buff *skb = skb_peek_tail(list); 
    if (skb)
	__skb_unlink(skb, list);
    return skb;
}




/*
 *	Add data to an sk_buff header
 */
 
extern __inline__ unsigned char *__skb_put_h(struct sk_buff *skb, unsigned int len)
{
    unsigned char *tmp=skb->h_tail;
    skb->h_tail+=len;
    skb->len+=len;
    ASSERT(skb->h_tail<=skb->h_end);
    return tmp;
}
#define skb_put_h(skb, len) __skb_put_h(skb, len)

extern __inline__ unsigned char *__skb_push_h(struct sk_buff *skb, unsigned int len)
{
    skb->h_data-=len;
    skb->len+=len;
    ASSERT(skb->h_data>=skb->h_head);
    return skb->h_data;
}
#define skb_push_h(skb, len) __skb_push_h(skb, len)

extern __inline__ unsigned char *__skb_pull_h(struct sk_buff *skb, unsigned int len)
{
    skb->len-=len;
    ASSERT((skb->h_data+len)<=skb->h_end); 
    return 	skb->h_data+=len;
}

extern __inline__ unsigned char * skb_pull_h(struct sk_buff *skb, unsigned int len)
{	
    if (len > (skb->h_tail - skb->h_data)) return NULL;
    return __skb_pull_h(skb,len);
}

extern __inline__ int skb_h_headroom(struct sk_buff *skb)
{
    return skb->h_data-skb->h_head;
}

extern __inline__ int skb_h_tailroom(struct sk_buff *skb)
{
    return skb->h_end-skb->h_tail;
}

extern __inline__ void skb_reserve_h(struct sk_buff *skb, unsigned int len)
{
    skb->h_data+=len;
    skb->h_tail+=len;
    ASSERT(skb->h_tail<=skb->h_end);
}

extern __inline__ void __skb_trim_h(struct sk_buff *skb, unsigned int len)
{
    skb->len = len;
    skb->h_tail = skb->h_data+len;
    ASSERT(skb->h_tail<=skb->h_end );
}

extern __inline__ void skb_trim_h(struct sk_buff *skb, unsigned int len)
{
    if ( (skb->h_tail - skb->h_data) > len ) __skb_trim_h(skb, len);
}


/*
 *	Add data to an sk_buff data area
 */
 
extern __inline__ unsigned char *__skb_put_d(struct sk_buff *skb, unsigned int len)
{
    unsigned char *tmp=skb->d_tail;
    skb->d_tail+=len;
    skb->len+=len;
    ASSERT(skb->d_tail<=skb->d_end );
    return tmp;
}
#define skb_put_d(skb, len) __skb_put_d(skb, len)

extern __inline__ unsigned char *__skb_push_d(struct sk_buff *skb, unsigned int len)
{
    skb->d_data-=len;
    skb->len+=len;
    ASSERT(skb->d_data>=skb->d_head );
    return skb->d_data;
}
#define skb_push_d(skb, len) __skb_push_d(skb, len)

extern __inline__ unsigned char *__skb_pull_d(struct sk_buff *skb, unsigned int len)
{
    skb->len-=len;
    ASSERT((skb->d_data+len)<=skb->d_end);
    return 	skb->d_data+=len;
}

extern __inline__ unsigned char * skb_pull_d(struct sk_buff *skb, unsigned int len)
{	
    if ( len > (skb->d_tail - skb->d_data) ) return NULL;
    return __skb_pull_d(skb,len);
}

extern __inline__ int skb_d_headroom(struct sk_buff *skb)
{
    return skb->d_data-skb->d_head;
}

extern __inline__ int skb_d_tailroom(struct sk_buff *skb)
{
    return skb->d_end-skb->d_tail;
}

extern __inline__ void skb_reserve_d(struct sk_buff *skb, unsigned int len)
{
    skb->d_data+=len;
    skb->d_tail+=len;
    ASSERT(skb->d_tail<=skb->d_end );
}

extern __inline__ void __skb_trim_d(struct sk_buff *skb, unsigned int len)
{
    skb->len = len;
    skb->d_tail = skb->d_data+len;
    ASSERT(skb->d_tail<=skb->d_end );
}

extern __inline__ void skb_trim_d(struct sk_buff *skb, unsigned int len)
{
    if ( (skb->d_tail - skb->d_data) > len) __skb_trim_d(skb, len);
}






extern __inline__ void skb_orphan(struct sk_buff *skb)
{
    if (skb->destructor)
	skb->destructor(skb);
    skb->destructor = NULL;
    skb->sk = NULL;
}

extern __inline__ void skb_queue_purge(struct sk_buff_head *list)
{
    struct sk_buff *skb;
    while ((skb=skb_dequeue(list))!=NULL)
	kfree_skb(skb);
}

extern __inline__ void __skb_queue_purge(struct sk_buff_head *list)
{
    struct sk_buff *skb;
    while ((skb=__skb_dequeue(list))!=NULL)
	kfree_skb(skb);
}

extern __inline__ struct sk_buff *dev_alloc_skb(struct sock *sk, 
                                                unsigned int length)
{
    struct sk_buff *skb;
    panic("dev_alloc_skb: need to think about where we get skb from");
    skb = alloc_skb(sk, length+16);
    /*
     * KAF: reserve space for hardware header => skb_reserve_h.
     */
    if ( skb ) skb_reserve_h(skb,16);
    return skb;
}

extern __inline__ struct sk_buff *
skb_cow(struct sk_buff *skb, unsigned int headroom)
{
    headroom = (headroom+15)&~15;

    /*
     * Only called from ip_frag, where we are interested in space in
     * header => skb_h_headroom. (KAF)
     */
    if ((unsigned)skb_h_headroom(skb) < headroom || skb_cloned(skb)) {
	struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
	kfree_skb(skb);
	skb = skb2;
    }
    return skb;
}

extern struct sk_buff *		skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
extern int			skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
extern void			skb_free_datagram(struct sock * sk, struct sk_buff *skb);
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from net/neighbour.h */
struct neighbour
{
    int (*output)(struct sk_buff *skb);
};
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from net/dst.h */
#define RT_CACHE_DEBUG		0

#define DST_GC_MIN	(1*HZ)
#define DST_GC_INC	(5*HZ)
#define DST_GC_MAX	(120*HZ)

struct sk_buff;

int multipath_scheduler(struct multipath_pcb* mpcb);


struct dst_entry
{
    struct dst_entry        *next;
    atomic_t		__refcnt;	/* client references	*/
    int			__use;
    struct net_device	        *dev;
    int			obsolete;
    unsigned long		lastuse;
    unsigned long		expires;
    unsigned		mxlock;
    unsigned		pmtu;
    unsigned		window;
    unsigned		rtt;
    unsigned		rttvar;
    unsigned		ssthresh;
    unsigned		cwnd;
    unsigned		advmss;
    unsigned long		rate_last;	/* rate limiting for ICMP */
    unsigned long		rate_tokens;

    int			error;

    struct neighbour	*neighbour;
    struct hh_cache		*hh;

    int			(*input)(struct sk_buff*);
    int			(*output)(struct sk_buff*);

    struct  dst_ops	        *ops;
		
    char			info[0];
};


struct dst_ops
{
    unsigned short		family;
    unsigned short		protocol;
    unsigned		gc_thresh;

    int			(*gc)(void);
    struct dst_entry *	(*check)(struct dst_entry *, __u32 cookie);
    struct dst_entry *	(*reroute)(struct dst_entry *,
				   struct sk_buff *);
    void			(*destroy)(struct dst_entry *);
    struct dst_entry *	(*negative_advice)(struct dst_entry *);
    void			(*link_failure)(struct sk_buff *);
    int			entry_size;

    atomic_t		entries;
    kmem_cache_t 		*kmem_cachep;
};

extern __inline__ void dst_hold(struct dst_entry * dst)
{
    atomic_inc(&dst->__refcnt);
}

extern __inline__ struct dst_entry * dst_clone(struct dst_entry * dst)
{
    if ( dst ) atomic_inc(&dst->__refcnt);
    return dst;
}

extern __inline__ void dst_release(struct dst_entry * dst)
{
    if ( dst ) atomic_dec(&dst->__refcnt);
}

extern void * dst_alloc(struct dst_ops * ops);
extern void __dst_free(struct dst_entry * dst);
extern void dst_destroy(struct dst_entry * dst);

extern __inline__ void dst_free(struct dst_entry * dst)
{
    if ( dst->obsolete > 1 ) return;
    if ( !atomic_read(&dst->__refcnt) ) 
    {
	dst_destroy(dst);
	return;
    }
    __dst_free(dst);
}

extern __inline__ void dst_confirm(struct dst_entry *dst)
{
    if ( dst ) neigh_confirm(dst->neighbour);
}

extern __inline__ void dst_negative_advice(struct dst_entry **dst_p)
{
#if 0
    struct dst_entry * dst = *dst_p;
    if (dst && dst->ops->negative_advice)
	*dst_p = dst->ops->negative_advice(dst);
#endif
}

extern __inline__ void dst_link_failure(struct sk_buff *skb)
{
    struct dst_entry * dst = skb->dst;
    if (dst && dst->ops && dst->ops->link_failure)
	dst->ops->link_failure(skb);
}

extern __inline__ void dst_set_expires(struct dst_entry *dst, int timeout)
{
    unsigned long expires = jiffies + timeout;

    if (expires == 0)
	expires = 1;

    if (dst->expires == 0 || (long)(dst->expires - expires) > 0)
	dst->expires = expires;
}

extern void		dst_init(void);
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from {linux|net}/icmp.h */ 
#define ICMP_ECHOREPLY		0	/* Echo Reply			*/
#define ICMP_DEST_UNREACH	3	/* Destination Unreachable	*/
#define ICMP_SOURCE_QUENCH	4	/* Source Quench		*/
#define ICMP_REDIRECT		5	/* Redirect (change route)	*/
#define ICMP_ECHO		8	/* Echo Request			*/
#define ICMP_TIME_EXCEEDED	11	/* Time Exceeded		*/
#define ICMP_PARAMETERPROB	12	/* Parameter Problem		*/
#define ICMP_TIMESTAMP		13	/* Timestamp Request		*/
#define ICMP_TIMESTAMPREPLY	14	/* Timestamp Reply		*/
#define ICMP_INFO_REQUEST	15	/* Information Request		*/
#define ICMP_INFO_REPLY		16	/* Information Reply		*/
#define ICMP_ADDRESS		17	/* Address Mask Request		*/
#define ICMP_ADDRESSREPLY	18	/* Address Mask Reply		*/
#define NR_ICMP_TYPES		18


/* Codes for UNREACH. */
#define ICMP_NET_UNREACH	0	/* Network Unreachable		*/
#define ICMP_HOST_UNREACH	1	/* Host Unreachable		*/
#define ICMP_PROT_UNREACH	2	/* Protocol Unreachable		*/
#define ICMP_PORT_UNREACH	3	/* Port Unreachable		*/
#define ICMP_FRAG_NEEDED	4	/* Fragmentation Needed/DF set	*/
#define ICMP_SR_FAILED		5	/* Source Route failed		*/
#define ICMP_NET_UNKNOWN	6
#define ICMP_HOST_UNKNOWN	7
#define ICMP_HOST_ISOLATED	8
#define ICMP_NET_ANO		9
#define ICMP_HOST_ANO		10
#define ICMP_NET_UNR_TOS	11
#define ICMP_HOST_UNR_TOS	12
#define ICMP_PKT_FILTERED	13	/* Packet filtered */
#define ICMP_PREC_VIOLATION	14	/* Precedence violation */
#define ICMP_PREC_CUTOFF	15	/* Precedence cut off */
#define NR_ICMP_UNREACH		15	/* instead of hardcoding immediate value */

/* Codes for REDIRECT. */
#define ICMP_REDIR_NET		0	/* Redirect Net			*/
#define ICMP_REDIR_HOST		1	/* Redirect Host		*/
#define ICMP_REDIR_NETTOS	2	/* Redirect Net for TOS		*/
#define ICMP_REDIR_HOSTTOS	3	/* Redirect Host for TOS	*/

/* Codes for TIME_EXCEEDED. */
#define ICMP_EXC_TTL		0	/* TTL count exceeded		*/
#define ICMP_EXC_FRAGTIME	1	/* Fragment Reass time exceeded	*/


struct icmphdr {
    __u8		type;
    __u8		code;
    __u16		checksum;
    union {
	struct {
	    __u16	id;
	    __u16	sequence;
	} echo;
	__u32	gateway;
	struct {
	    __u16	__unused;
	    __u16	mtu;
	} frag;
    } un;
};

struct icmp_err {
    int		errno;
    unsigned	fatal:1;
};

/*
 *	constants for (set|get)sockopt
 */ 
#define ICMP_FILTER			1

struct icmp_filter {
    __u32		data;
};  

extern struct icmp_err icmp_err_convert[];
extern struct icmp_mib icmp_statistics;

#define icmp_send(a,b,c,d) 0
//extern void	icmp_send(struct sk_buff *skb_in,  int type, int code,
//			  unsigned long info);
extern int	icmp_rcv(struct sk_buff *skb, unsigned short len);
extern int	icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
//extern void	icmp_init(struct net_proto_family *ops);
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from net/sock.h */
struct raw_opt {
    struct icmp_filter	filter;
};

struct inet_opt
{
    int			ttl;			/* TTL setting */
    int			tos;			/* TOS */
    unsigned	   	cmsg_flags;
    struct ip_options	*opt;
    unsigned char		hdrincl;		/* Include headers ? */
    __u8			mc_ttl;			/* Multicasting TTL */
    __u8			mc_loop;		/* Loopback */
    __u8			recverr;
    __u8			pmtudisc;
    int			mc_index;		/* Multicast device index */
    __u32			mc_addr;
    struct ip_mc_socklist	*mc_list;		/* Group array */
};

/* This defines a selective acknowledgement block. */
struct tcp_sack_block {
    __u32	start_seq;
    __u32	end_seq;
};

extern int multcp_N; 
extern int sqrt_multcp_N; 

#define DECL_ID

#ifdef DECL_ID
int tp_count; 
#endif

#ifdef CIRC
#include "circ.h"
#endif


#ifdef PLOT_CWND
__u32 old_snd_cwnd; 
#endif

struct tcp_opt {

#ifdef DECL_ID
  int id; 
#endif
  
  int	tcp_header_len;	/* Bytes of tcp header to send		*/

/*
 *	Header prediction flags
 *	0x5?10 << 16 + snd_wnd in net byte order
 */
    __u32	pred_flags;

/*
 *	RFC793 variables by their proper names. This means you can
 *	read the code and the spec side by side (and laugh ...)
 *	See RFC793 and RFC1122. The RFC writes these in capitals.
 */
    __u32	rcv_nxt;	/* What we want to receive next 	*/
    __u32	snd_nxt;	/* Next sequence we send		*/

    __u32	snd_una;	/* First byte we want an ack for	*/
    __u32	rcv_tstamp;	/* timestamp of last received packet	*/
    __u32	lrcvtime;	/* timestamp of last received data packet*/
    __u32	srtt;		/* smothed round trip time << 3		*/

    __u32	ato;		/* delayed ack timeout			*/
    __u32	snd_wl1;	/* Sequence for window update		*/

    __u32	snd_wl2;	/* Ack sequence for update		*/
    __u32	snd_wnd;	/* The window we expect to receive	*/
    __u32	max_window;
    __u32	pmtu_cookie;	/* Last pmtu seen by socket		*/
    __u16	mss_cache;	/* Cached effective mss, not including SACKS */
    __u16	mss_clamp;	/* Maximal mss, negotiated at connection setup */
    __u16	ext_header_len;	/* Dave, do you allow mw to use this hole? 8) --ANK */
    __u8	pending;	/* pending events			*/
    __u8	retransmits;
    __u32	last_ack_sent;	/* last ack we sent			*/

    __u32	backoff;	/* backoff				*/
    __u32	mdev;		/* medium deviation			*/
    __u32	snd_cwnd;	/* Sending congestion window		*/
  
  __u32 last_drop_cwnd;
  __u32 mean_drop_cwnd;
  __u32 last_drop_seq;

    __u32	rto;		/* retransmit timeout			*/

    __u32	packets_out;	/* Packets which are "in flight"	*/
    __u32	fackets_out;	/* Non-retrans SACK'd packets		*/
    __u32	retrans_out;	/* Fast-retransmitted packets out	*/
    __u32	high_seq;	/* snd_nxt at onset of congestion	*/

/*
 *	Slow start and congestion control (see also Nagle, and Karn & Partridge)
 */
    __u32	snd_ssthresh;	/* Slow start size threshold		*/
    __u16	snd_cwnd_cnt;	/* Linear increase counter		*/
    __u16	snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
    __u32	dup_acks;	/* Consequetive duplicate acks seen from other end */

  __u8 in_fast_recovery;

    __u8	delayed_acks;
    __u16	user_mss;  	/* mss requested by user in ioctl */

    /* Two commonly used timers in both sender and receiver paths. */
    struct timer_list	retransmit_timer;	/* Resend (no ack)	*/
    struct timer_list	delack_timer;		/* Ack delay 		*/

    struct sk_buff_head	out_of_order_queue; /* Out of order segments go here */

    struct tcp_func		*af_specific;	/* Operations which are AF_INET{4,6} specific	*/
    struct sk_buff		*send_head;	/* Front of stuff to transmit			*/
    struct sk_buff		*retrans_head;	/* retrans head can be 
						 * different to the head of
						 * write queue if we are doing
						 * fast retransmit
						 */

    __u32	rcv_wnd;	/* Current receiver window		*/
    __u32	rcv_wup;	/* rcv_nxt on last window update sent	*/
    __u32	write_seq;
    __u32	copied_seq;
/*
 *      Options received (usually on last packet, some only on SYN packets).
 */
  char	tstamp_ok,	/* TIMESTAMP seen on SYN packet		*/
    wscale_ok,	/* Wscale seen on SYN packet		*/
    sack_ok;	/* SACK seen on SYN packet		*/
  char	saw_tstamp;	/* Saw TIMESTAMP on last packet		*/
  __u8	snd_wscale;	/* Window scaling received from sender	*/
  __u8	rcv_wscale;	/* Window scaling to send to receiver	*/
  __u8	rexmt_done;	/* Retransmitted up to send head?	*/
  __u32	rcv_tsval;	/* Time stamp value             	*/
  __u32	rcv_tsecr;	/* Time stamp echo reply        	*/
  __u32	ts_recent;	/* Time stamp to echo next		*/
  long	ts_recent_stamp;/* Time we stored ts_recent (for aging) */
  int	num_sacks;	/* Number of SACK blocks		*/

  struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/

  struct tcp_sack_block new_sacks[10]; // the new packets that are acked by the current sack block... 
  int num_new_sacks; 

  __u32 old_ack; // The cumulative ack from previous time... 
  
  struct timer_list	probe_timer;		/* Probes	*/
  __u32	window_clamp;	/* XXX Document this... -DaveM		*/
  __u32	probes_out;	/* unanswered 0 window probes		*/
  __u32	syn_seq;
    __u32	fin_seq;
    __u32	urg_seq;
    __u32	urg_data;

    __u32	last_seg_size;	/* Size of last incoming segment */
    __u32	rcv_mss;	/* MSS used for delayed ACK decisions */ 

    struct open_request	*syn_wait_queue;
    struct open_request	**syn_wait_last;

    int syn_backlog;	/* Backlog of received SYNs */
    int write_pending;
	
    unsigned int		keepalive_time;	  /* time before keep alive takes place */
    unsigned int		keepalive_intvl;  /* time interval between keep alive probes */
    unsigned char  		keepalive_probes; /* num of allowed keep alive probes */
    unsigned char		syn_retries;	  /* num of allowed syn retries */
};

 	
/* Define this to get the sk->debug debugging facility. */
#ifndef NDEBUG
#define SOCK_DEBUG(sk, msg...) do { /*if((sk) && ((sk)->debug))*/ printk(KERN_DEBUG ## msg); } while (0)
#else
#define SOCK_DEBUG(sk, msg...) do { } while (0)
#endif

typedef struct {
    pth_mutex_t         m;
    unsigned int	users;
} socket_lock_t;

struct sock {
    /* Socket demultiplex comparisons on incoming packets. */

    __u32			daddr;		/* Foreign IPv4 addr	 */
    __u32			rcv_saddr;	/* Bound local IPv4 addr */
    __u16			dport;		/* Destination port	 */
    unsigned short		num;		/* Local port		 */

    int			bound_dev_if;	/* Bound device index if != 0	 */

    /* Main hash linkage for various protocol lookup tables. */
    struct sock		*next;
    struct sock		**pprev;
    struct sock		*bind_next;
    struct sock		**bind_pprev;

    volatile unsigned char	state,		/* Connection state	*/
        zapped;		/* In ax25 & ipx means not linked	*/
    __u16			sport;		/* Source port		*/
    unsigned short		family;		/* Address family	*/
    unsigned char		reuse,		/* SO_REUSEADDR setting	*/
        nonagle;	/* Disable Nagle algorithm?		*/
    atomic_t		refcnt;		/* Reference count	*/

    /*
     * XXXXXXX above here MUST match with tcp_tw_bucket! (KAF)
     */

    socket_lock_t	 lock;	/* Synchronizer...*/
    struct sock *lprev, *lnext; /* Next sock on parent listener's conn queue */


    int			rcvbuf;		/* Size of receive buffer in bytes */
    
    wait_queue_head_t	*sleep;		/* Sock wait queue		   */
    struct dst_entry	*dst_cache;	/* Destination cache		   */
    rwlock_t		dst_lock;
    atomic_t		rmem_alloc;	/* Receive queue bytes committed   */
    struct sk_buff_head	receive_queue;	/* Incoming packets		   */
    atomic_t		wmem_alloc;	/* Transmit queue bytes committed  */
    struct sk_buff_head	write_queue;	/* Packet sending queue		   */
    atomic_t		omem_alloc;	/* "o" is "option" or "other" */
    __u32			saddr;		/* Sending source	   */
    unsigned int		allocation;	/* Allocation mode	   */
    int			sndbuf;		/* Size of send buffer in bytes	   */
    struct sock		*prev;
    
    volatile char		dead,
        done,
        urginline,
        keepopen,
        destroy;
    unsigned char		debug;
    unsigned char           no_check; /* UDP checksum */
    int			proc;
    
    int			hashent;

    /* The backlog queue is special, it is always used with
     * the per-socket spinlock held and requires low latency
     * access.  Therefore we special case it's implementation.
     */
    struct {
        struct sk_buff *head;
        struct sk_buff *tail;
    } backlog;

    rwlock_t		callback_lock;

    /* Error queue, rarely used. */
    struct sk_buff_head	error_queue;

    struct proto		*prot;
    
    unsigned short		shutdown;
    
    union {
        struct tcp_opt af_tcp;
        struct raw_opt tp_raw4;
    } tp_pinfo;

    int			err, err_soft;	/* Soft holds errors that don't
                                           cause failure but are the cause
                                           of a persistent failure not just
                                           'timed out' */
    unsigned short		ack_backlog;
    unsigned short		max_ack_backlog;
    __u32			priority;
    unsigned short          type;
    unsigned char		localroute;	/* Route locally only */
    unsigned char           protocol;


    
    union {
        void *destruct_hook;
        struct inet_opt af_inet;
    } protinfo;  		

    spinlock_t              timer_lock;
    struct timer_list	timer;		/* This is the sock cleanup timer. */
    /* Identd */
    struct socket		*socket;
    struct user_pcb             *pcb;

    /* Callbacks */
    void (*state_change)(struct sock *sk);
    void (*data_ready)(struct sock *sk,int bytes);
    void (*write_space)(struct sock *sk);
    int  (*backlog_rcv)(struct sock *sk, struct sk_buff *skb);
    void (*error_report)(struct sock *sk);
    void (*destruct)(struct sock *sk);
}; // end of struct sock

/* The per-socket spinlock must be held here. */
#define sk_add_backlog(__sk, __skb)			\
do {	if((__sk)->backlog.tail == NULL) {		\
		(__sk)->backlog.head =			\
		     (__sk)->backlog.tail = (__skb);	\
	} else {					\
		((__sk)->backlog.tail)->next = (__skb);	\
		(__sk)->backlog.tail = (__skb);		\
	}						\
	(__skb)->next = NULL;				\
} while(0)

/* IP protocol blocks we attach to sockets.
 * socket layer -> transport layer interface
 * transport -> network interface is defined by struct inet_proto
 */
struct proto {
    void			(*close)(struct sock *sk, 
					 long timeout);
    int			(*connect)(struct sock *sk,
				   struct sockaddr *uaddr, 
				   int addr_len);
    int			(*disconnect)(struct sock *sk, int flags);

    struct sock *		(*accept) (struct sock *sk, int flags, int *err);
    void			(*retransmit)(struct sock *sk, int all);
    void			(*write_wakeup)(struct sock *sk);
    void			(*read_wakeup)(struct sock *sk);

    unsigned int		(*poll)(struct file * file, struct socket *sock,
					struct poll_table_struct *wait);

    int			(*ioctl)(struct sock *sk, int cmd,
				 unsigned long arg);
    int			(*init)(struct sock *sk);
    int			(*destroy)(struct sock *sk);
    void			(*shutdown)(struct sock *sk, int how);
    int			(*setsockopt)(struct sock *sk, int level, 
				      int optname, char *optval, int optlen);
    int			(*getsockopt)(struct sock *sk, int level, 
				      int optname, char *optval, 
				      int *option);  	 
    int			(*sendmsg)(struct sock *sk, struct msghdr *msg,
				   int len);
    int			(*recvmsg)(struct sock *sk, struct msghdr *msg,
				   int len, int noblock, int flags, 
				   int *addr_len);
    int			(*bind)(struct sock *sk, 
				struct sockaddr *uaddr, int addr_len);

    int			(*backlog_rcv) (struct sock *sk, 
					struct sk_buff *skb);

    void			(*hash)(struct sock *sk);
    void			(*unhash)(struct sock *sk);
    int			(*get_port)(struct sock *sk, unsigned short snum);

    unsigned short		max_header;
    unsigned long		retransmits;
    char			name[32];
    int			inuse, highestinuse;
};

#define SHUTDOWN_MASK	3
#define RCV_SHUTDOWN	1
#define SEND_SHUTDOWN	2

/* Used by processes to "lock" a socket state, so that
 * interrupts and bottom half handlers won't change it
 * from under us. It essentially blocks any incoming
 * packets, so that we won't get any new data or any
 * packets that change the state of the socket.
 *
 * While locked, BH processing will add new packets to
 * the backlog queue.  This queue is processed by the
 * owner of the socket lock right before it is released.
 *
 * XXX KAF: changed quite a bit, and probably less efficient at the moment :-(
 */
extern void __release_sock(struct sock *sk);

#define lock_sock(__sk) \
do {	pth_mutex_acquire(&((__sk)->lock.m),0,NULL); \
	(__sk)->lock.users++; \
} while(0)

#define sock_lock_init(__sk) \
do {	(__sk)->lock.users = 0; \
        pth_mutex_init(&((__sk)->lock.m)); \
} while(0);

#define release_sock(__sk) \
do {	(__sk)->lock.users--; \
	if ((__sk)->backlog.tail != NULL) __release_sock(__sk); \
        pth_mutex_release(&((__sk)->lock.m)); \
} while(0)

/*
 * XXX KAF: rather poor! We could do with two locks, really. The top half
 * would then lock the 2nd mutex before looking at the backlog queue.
 * 
 * This should be quite easy to add later -- let's see how things run!
 * 
 * 10/12/99: Okay, I did this wrong. We must distinguish between the BH
 * thread and others, otherwise no TH thread will ever get segments to
 * process! The BH will simply process them all when it releases... :-(
 * Solution: 
 *  1. Bottom half should not look at backlog at all when releasing.
 *  2. But what if no other threads waiting to process a backlog? Other
 *     sections of the stack check the 'users' field, so the BH should not
 *     increment it, or it will never be zero!
 */
#define bh_lock_sock(__sk) pth_mutex_acquire(&((__sk)->lock.m),0,NULL)
#define bh_unlock_sock(__sk) pth_mutex_release(&((__sk)->lock.m))

/*
 * XXX KAF (11/12/99): further changes -- the kernel stack does a release/lock
 * around every wait, thus eliminating that thread from the 'user' count.
 * We don't want to release/lock, as that is done safely for us by
 * pth_cond_await()... we simply want to decrement the count, then re-inc
 * it. The following macro does the job :-)
 */
#define sock_sleep(__sk) \
({ \
    (__sk)->lock.users--; \
    pth_cond_await((__sk)->sleep, &((__sk)->lock.m), NULL); \
    (__sk)->lock.users++; \
})

/*static __inline__ int min(unsigned int a, unsigned int b){ 
   if ( a > b ) a = b; 
    return a;
}

static __inline__ int max(unsigned int a, unsigned int b)
{
    if ( a < b ) a = b;
    return a;
}*/

struct socket *create_new_socket(int protocol);
void free_socket(struct socket *sock);
int bind_and_connect_new_socket(struct socket *sock,
                                u32 saddr,
                                u16 sport,
                                u32 daddr,
                                u16 dport);

extern void sock_init_data(struct socket *sock, struct sock *sk);
extern struct sock *		sk_alloc(int family, int priority, int zero_it);
extern void			sk_free(struct sock *sk);

extern struct sk_buff		*sock_wmalloc(struct sock *sk,
					      unsigned long size, int force,
					      int priority);
extern struct sk_buff		*sock_rmalloc(struct sock *sk,
					      unsigned long size, int force,
					      int priority);
extern void			sock_wfree(struct sk_buff *skb);
extern void			sock_rfree(struct sk_buff *skb);
extern void			sock_cfree(struct sk_buff *skb);
extern unsigned long		sock_rspace(struct sock *sk);
extern unsigned long		sock_wspace(struct sock *sk);

extern int			sock_setsockopt(struct socket *sock, int level,
						int op, char *optval,
						int optlen);

extern int			sock_getsockopt(struct socket *sock, int level,
						int op, char *optval, 
						int *optlen);
extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
						     unsigned long size,
						     unsigned long fallback,
						     int noblock,
						     int *errcode);
extern void *sock_kmalloc(struct sock *sk, int size, int priority);
extern void sock_kfree_s(struct sock *sk, void *mem, int size);


/*
 * Functions to fill in entries in struct proto_ops when a protocol
 * does not implement a particular function.
 */
extern int                      sock_no_release(struct socket *);
extern int                      sock_no_bind(struct socket *, 
					     struct sockaddr *, int);
extern int                      sock_no_connect(struct socket *,
						struct sockaddr *, int, int);
extern int                      sock_no_socketpair(struct socket *,
						   struct socket *);
extern int                      sock_no_accept(struct socket *,
					       struct socket *, int);
extern int                      sock_no_getname(struct socket *,
						struct sockaddr *, int *, int);
extern unsigned int             sock_no_poll(struct file *, struct socket *,
					     struct poll_table_struct *);
extern int                      sock_no_ioctl(struct socket *, unsigned int,
					      unsigned long);
extern int			sock_no_listen(struct socket *, int);
extern int                      sock_no_shutdown(struct socket *, int);
extern int			sock_no_getsockopt(struct socket *, int , int,
						   char *, int *);
extern int			sock_no_setsockopt(struct socket *, int, int,
						   char *, int);
extern int 			sock_no_fcntl(struct socket *, 
					      unsigned int, unsigned long);
extern int                      sock_no_sendmsg(struct socket *,
						struct msghdr *, int,
						struct scm_cookie *);
extern int                      sock_no_recvmsg(struct socket *,
						struct msghdr *, int,
						struct scm_cookie *);
extern int			sock_no_mmap(struct file *file,
					     struct socket *sock,
					     struct vm_area_struct *vma);

/*
 *	Default socket callbacks and setup code
 */
 
extern void sock_def_callback1(struct sock *);
extern void sock_def_callback2(struct sock *, int);
extern void sock_def_callback3(struct sock *);
extern void sock_def_destruct(struct sock *);

extern void sklist_remove_socket(struct sock **list, struct sock *sk);
extern void sklist_insert_socket(struct sock **list, struct sock *sk);
extern void sklist_destroy_socket(struct sock **list, struct sock *sk);

/*
 * Socket reference counting postulates.
 *
 * * Each user of socket SHOULD hold a reference count.
 * * Each access point to socket (an hash table bucket, reference from a list,
 *   running timer, skb in flight MUST hold a reference count.
 * * When reference count hits 0, it means it will never increase back.
 * * When reference count hits 0, it means that no references from
 *   outside exist to this socket and current process on current CPU
 *   is last user and may/should destroy this socket.
 * * sk_free is called from any context: process, BH, IRQ. When
 *   it is called, socket has no references from outside -> sk_free
 *   may release descendant resources allocated by the socket, but
 *   to the time when it is called, socket is NOT referenced by any
 *   hash tables, lists etc.
 * * Packets, delivered from outside (from network or from another process)
 *   and enqueued on receive/error queues SHOULD NOT grab reference count,
 *   when they sit in queue. Otherwise, packets will leak to hole, when
 *   socket is looked up by one cpu and unhasing is made by another CPU.
 *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
 *   (leak to backlog). Packet socket does all the processing inside
 *   ptype_lock, so that it has not this race condition. UNIX sockets
 *   use separate SMP lock, so that they are prone too.
 */

/* Grab socket reference count. This operation is valid only
   when sk is ALREADY grabbed f.e. it is found in hash table
   or a list and the lookup is made under lock preventing hash table
   modifications.
 */

extern __inline__ void sock_hold(struct sock *sk)
{
    atomic_inc(&sk->refcnt);
}

/* Ungrab socket in the context, which assumes that socket refcnt
   cannot hit zero, f.e. it is true in context of any socketcall.
 */
extern __inline__ void __sock_put(struct sock *sk)
{
    atomic_dec(&sk->refcnt);
}

/* Ungrab socket and destroy it, if it was the last reference. */
extern __inline__ void sock_put(struct sock *sk)
{
    if (atomic_dec_and_test(&sk->refcnt))
	sk_free(sk);
}

extern __inline__ struct dst_entry *
__sk_dst_get(struct sock *sk)
{
    return sk->dst_cache;
}

extern __inline__ struct dst_entry *
sk_dst_get(struct sock *sk)
{
    struct dst_entry *dst;

    read_lock(&sk->dst_lock);
    dst = sk->dst_cache;
    if (dst)
	dst_hold(dst);
    read_unlock(&sk->dst_lock);
    return dst;
}

extern __inline__ void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
    struct dst_entry *old_dst;

    old_dst = sk->dst_cache;
    sk->dst_cache = dst;
    dst_release(old_dst);
}

extern __inline__ void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
    write_lock(&sk->dst_lock);
    __sk_dst_set(sk, dst);
    write_unlock(&sk->dst_lock);
}



extern __inline__ void
__sk_dst_reset(struct sock *sk)
{
    struct dst_entry *old_dst;

    old_dst = sk->dst_cache;
    sk->dst_cache = NULL;
    dst_release(old_dst);
}

extern __inline__ void
sk_dst_reset(struct sock *sk)
{
    write_lock(&sk->dst_lock);
    __sk_dst_reset(sk);
    write_unlock(&sk->dst_lock);
}

extern __inline__ struct dst_entry *
__sk_dst_check(struct sock *sk, u32 cookie)
{
    struct dst_entry *dst = sk->dst_cache;

    if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
	sk->dst_cache = NULL;
	return NULL;
    }

    return dst;
}

extern __inline__ struct dst_entry *
sk_dst_check(struct sock *sk, u32 cookie)
{
    struct dst_entry *dst = sk_dst_get(sk);

    if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
	sk_dst_reset(sk);
	return NULL;
    }

    return dst;
}


/*
 * 	Queue a received datagram if it will fit. Stream and sequenced
 *	protocols can't normally use this as they need to fit buffers in
 *	and play with them.
 *
 * 	Inlined as it's very short and called for pretty much every
 *	packet ever received.
 */

extern __inline__ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
{
    sock_hold(sk);
    skb->sk = sk;
    skb->destructor = sock_wfree;
    if ( atomic_read(skb_datarefp(skb)) == 1 )
    {
	atomic_add(skb->truesize, &sk->wmem_alloc);
    }
}

extern __inline__ void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
    skb->sk = sk;
    skb->destructor = sock_rfree;
    atomic_add(skb->truesize, &sk->rmem_alloc);
}

extern __inline__ void skb_set_owner_c(struct sk_buff *skb, struct sock *sk)
{
    sock_hold(sk);
    skb->sk = sk;
    skb->destructor = sock_cfree;
}


extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
    /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
       number of warnings when compiling with -W --ANK
    */
    if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
	return -ENOMEM;

    skb_set_owner_r(skb, sk);
    skb_queue_tail(&sk->receive_queue, skb);
    if (!sk->dead)
	sk->data_ready(sk,skb->len);
    return 0;
}

extern __inline__ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
    /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
       number of warnings when compiling with -W --ANK
    */
    if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
	return -ENOMEM;
    skb_set_owner_r(skb, sk);
    skb_queue_tail(&sk->error_queue,skb);
    if (!sk->dead)
	sk->data_ready(sk,skb->len);
    return 0;
}

/*
 *	Recover an error report and clear atomically
 */
 
extern __inline__ int sock_error(struct sock *sk)
{
    int err=xchg(&sk->err,0);
    return -err;
}

extern __inline__ unsigned long sock_wspace(struct sock *sk)
{
    int amt = 0;

    if (!(sk->shutdown & SEND_SHUTDOWN)) {
	amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
	if (amt < 0) 
	    amt = 0;
    }
    return amt;
}

#define MIN_WRITE_SPACE 2048
/*
 *	Default write policy as shown to user space via poll/select/SIGIO
 *	Kernel internally doesn't use the MIN_WRITE_SPACE threshold.
 */
extern __inline__ int sock_writeable(struct sock *sk) 
{
    return sock_wspace(sk) >= MIN_WRITE_SPACE;
}


/* 
 *	Enable debug/info messages 
 */

#ifdef NDEBUG
#define NETDEBUG(x)	do { } while (0)
#else
#define NETDEBUG(x)	do { x; } while (0)
#endif
/*****************************************************************************/


/*****************************************************************************/
/*** XXX KAF -- from linux/poll.h */
/* These are specified by iBCS2 */
#define POLLIN		0x0001
#define POLLPRI		0x0002
#define POLLOUT		0x0004
#define POLLERR		0x0008
#define POLLHUP		0x0010
#define POLLNVAL	0x0020

/* The rest seem to be more-or-less nonstandard. Check them! */
#define POLLRDNORM	0x0040
#define POLLRDBAND	0x0080
#define POLLWRNORM	0x0100
#define POLLWRBAND	0x0200
#define POLLMSG		0x0400

typedef struct poll_table_struct {} poll_table;

extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);

extern inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
#if 0
    if (p && wait_address)
	XXX	__pollwait(filp, wait_address, p);
#endif
}
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/in.h */
enum {
    IPPROTO_IP   = 0,           /* Dummy protocol for TCP               */
    IPPROTO_ICMP = 1,           /* Internet Control Message Protocol    */
    IPPROTO_TCP  = 6,           /* Transmission Control Protocol	*/
    IPPROTO_UDP  = 17,          /* User Datagram Protocol		*/
    IPPROTO_RAW  = 255,         /* Raw IP packets                       */
    IPPROTO_MAX
};
 
/* Internet address. */
struct in_addr {
    __u32	s_addr;
};

#define IP_TOS		1
#define IP_TTL		2
#define IP_HDRINCL	3
#define IP_OPTIONS	4
#define IP_ROUTER_ALERT	5
#define IP_RECVOPTS	6
#define IP_RETOPTS	7
#define IP_PKTINFO	8
#define IP_PKTOPTIONS	9
#define IP_MTU_DISCOVER	10
#define IP_RECVERR	11
#define IP_RECVTTL	12
#define	IP_RECVTOS	13
#define IP_MTU		14

/* BSD compatibility */
#define IP_RECVRETOPTS	IP_RETOPTS
 
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT		0	/* Never send DF frames */
#define IP_PMTUDISC_WANT		1	/* Use per route hints	*/
#define IP_PMTUDISC_DO			2	/* Always DF		*/

#define IP_MULTICAST_IF			32
#define IP_MULTICAST_TTL 		33
#define IP_MULTICAST_LOOP 		34
#define IP_ADD_MEMBERSHIP		35
#define IP_DROP_MEMBERSHIP		36

/* These need to appear somewhere around here */
#define IP_DEFAULT_MULTICAST_TTL        1
#define IP_DEFAULT_MULTICAST_LOOP       1

/* Request struct for multicast socket ops */

struct ip_mreq 
{
    struct in_addr imr_multiaddr;	/* IP multicast address of group */
    struct in_addr imr_interface;	/* local IP address of interface */
};

struct ip_mreqn
{
    struct in_addr	imr_multiaddr;	/* IP multicast address of group */
    struct in_addr	imr_address;	/* local IP address of interface */
    int		imr_ifindex;	/* Interface index */
};

struct in_pktinfo
{
    int		ipi_ifindex;
    struct in_addr	ipi_spec_dst;
    struct in_addr	ipi_addr;
};
 
/* Structure describing an Internet (IP) socket address. */
#define __SOCK_SIZE__	16		/* sizeof(struct sockaddr)	*/
struct sockaddr_in {
    sa_family_t		sin_family;	/* Address family		*/
    unsigned short int	sin_port;	/* Port number			*/
    struct in_addr	sin_addr;	/* Internet address		*/

    /* Pad to size of `struct sockaddr'. */
    unsigned char		__pad[__SOCK_SIZE__ - sizeof(short int) -
				      sizeof(unsigned short int) - sizeof(struct in_addr)];
};

/*
 * Definitions of the bits in an Internet address integer.
 * On subnets, host and network parts are found according
 * to the subnet mask, not these masks.
 */
#define	IN_CLASSA(a)		((((long int) (a)) & 0x80000000) == 0)
#define	IN_CLASSA_NET		0xff000000
#define	IN_CLASSA_NSHIFT	24
#define	IN_CLASSA_HOST		(0xffffffff & ~IN_CLASSA_NET)
#define	IN_CLASSA_MAX		128

#define	IN_CLASSB(a)		((((long int) (a)) & 0xc0000000) == 0x80000000)
#define	IN_CLASSB_NET		0xffff0000
#define	IN_CLASSB_NSHIFT	16
#define	IN_CLASSB_HOST		(0xffffffff & ~IN_CLASSB_NET)
#define	IN_CLASSB_MAX		65536

#define	IN_CLASSC(a)		((((long int) (a)) & 0xe0000000) == 0xc0000000)
#define	IN_CLASSC_NET		0xffffff00
#define	IN_CLASSC_NSHIFT	8
#define	IN_CLASSC_HOST		(0xffffffff & ~IN_CLASSC_NET)

#define	IN_CLASSD(a)		((((long int) (a)) & 0xf0000000) == 0xe0000000)
#define	IN_MULTICAST(a)		IN_CLASSD(a)
#define IN_MULTICAST_NET	0xF0000000

#define	IN_EXPERIMENTAL(a)	((((long int) (a)) & 0xf0000000) == 0xf0000000)
#define	IN_BADCLASS(a)		IN_EXPERIMENTAL((a))

/* Address to accept any incoming messages. */
#define	INADDR_ANY		((unsigned long int) 0x00000000)

/* Address to send to all hosts. */
#define	INADDR_BROADCAST	((unsigned long int) 0xffffffff)

/* Address indicating an error return. */
#define	INADDR_NONE		((unsigned long int) 0xffffffff)

/* Network number for local host loopback. */
#define	IN_LOOPBACKNET		127

/* Address to loopback in software to local host.  */
#define	INADDR_LOOPBACK		0x7f000001	/* 127.0.0.1   */
#define	IN_LOOPBACK(a)		((((long int) (a)) & 0xff000000) == 0x7f000000)

/* Defines for Multicast INADDR */
#define INADDR_UNSPEC_GROUP   	0xe0000000U	/* 224.0.0.0   */
#define INADDR_ALLHOSTS_GROUP 	0xe0000001U	/* 224.0.0.1   */
#define INADDR_ALLRTRS_GROUP    0xe0000002U	/* 224.0.0.2 */
#define INADDR_MAX_LOCAL_GROUP  0xe00000ffU	/* 224.0.0.255 */

#define LOOPBACK(x)	(((x) & htonl(0xff000000)) == htonl(0x7f000000))
#define MULTICAST(x)	(((x) & htonl(0xf0000000)) == htonl(0xe0000000))
#define BADCLASS(x)	(((x) & htonl(0xf0000000)) == htonl(0xf0000000))
#define ZERONET(x)	(((x) & htonl(0xff000000)) == htonl(0x00000000))
#define LOCAL_MCAST(x)	(((x) & htonl(0xFFFFFF00)) == htonl(0xE0000000))
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/rtnetlink.h */
enum
{
    RTN_UNSPEC,
    RTN_UNICAST,		/* Gateway or direct route	*/
    RTN_LOCAL,		/* Accept locally		*/
    RTN_BROADCAST,		/* Accept locally as broadcast,
				   send as broadcast */
    RTN_ANYCAST,		/* Accept locally as broadcast,
				   but send as unicast */
    RTN_MULTICAST,		/* Multicast route		*/
    RTN_BLACKHOLE,		/* Drop				*/
    RTN_UNREACHABLE,	/* Destination is unreachable   */
    RTN_PROHIBIT,		/* Administratively prohibited	*/
    RTN_THROW,		/* Not in this table		*/
    RTN_NAT,		/* Translate this address	*/
    RTN_XRESOLVE,		/* Use external resolver	*/
};
#define RTN_MAX RTN_XRESOLVE
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/netdevice.h */
#define NET_XMIT_SUCCESS	0
#define NET_XMIT_DROP		1	/* skb dropped			*/
#define NET_XMIT_CN		2	/* congestion notification	*/
#define NET_XMIT_POLICED	3	/* skb is shot by police	*/

#define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)

struct hh_cache
{
    struct hh_cache *hh_next;	/* Next entry			     */
    atomic_t	hh_refcnt;	/* number of users                   */
    unsigned short  hh_type;	/* protocol identifier, f.e ETH_P_IP */
    int		hh_len;		/* length of header */
    int		(*hh_output)(struct sk_buff *skb);
    rwlock_t	hh_lock;
    /* cached hardware header; allow for machine alignment needs.        */
    unsigned long	hh_data[16/sizeof(unsigned long)];
};

#define MAX_IFNAME_LEN 7
struct net_device
{
    char name[MAX_IFNAME_LEN+1];
    struct net_device *next;

    int	  ifindex;
    u_int mtu;               /* interface MTU value */
    u_short hard_header_len; /* hardware header length */
    u_char dev_addr[6];
    u_char addr_len;
    // XXX only used in a test for NULL -- might be able to fake
    int			(*hard_header) (struct sk_buff *skb,
                                        struct net_device *dev,
                                        unsigned short type,
                                        void *daddr,
                                        void *saddr,
                                        unsigned len);

    /*
     * NEW FIELDS FOLLOW...
     */
    u32   ip_addr;           /* layer violation! never mind :) */
    struct rtable *rt_list;
};

struct packet_type 
{
    unsigned short		type;	/* This is really htons(ether_type).	*/
    struct net_device		*dev;	/* NULL is wildcarded here		*/
    int			(*func) (struct sk_buff *, struct net_device *,
				 struct packet_type *);
    void			*data;	/* Private to the packet type		*/
    struct packet_type	*next;
};

#define dev_put(dev) panic("dev_put: not supported!")
#define __dev_get_by_index(ind) \
(panic("__dev_get_by_index: not supported!"),(void*)0)
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/ip.h and net/ip.h */
/* SOL_IP socket options */

#define IPTOS_TOS_MASK		0x1E
#define IPTOS_TOS(tos)		((tos)&IPTOS_TOS_MASK)
#define	IPTOS_LOWDELAY		0x10
#define	IPTOS_THROUGHPUT	0x08
#define	IPTOS_RELIABILITY	0x04
#define	IPTOS_MINCOST		0x02

#define IPTOS_PREC_MASK		0xE0
#define IPTOS_PREC(tos)		((tos)&IPTOS_PREC_MASK)
#define IPTOS_PREC_NETCONTROL           0xe0
#define IPTOS_PREC_INTERNETCONTROL      0xc0
#define IPTOS_PREC_CRITIC_ECP           0xa0
#define IPTOS_PREC_FLASHOVERRIDE        0x80
#define IPTOS_PREC_FLASH                0x60
#define IPTOS_PREC_IMMEDIATE            0x40
#define IPTOS_PREC_PRIORITY             0x20
#define IPTOS_PREC_ROUTINE              0x00


/* IP options */
#define IPOPT_COPY		0x80
#define IPOPT_CLASS_MASK	0x60
#define IPOPT_NUMBER_MASK	0x1f

#define	IPOPT_COPIED(o)		((o)&IPOPT_COPY)
#define	IPOPT_CLASS(o)		((o)&IPOPT_CLASS_MASK)
#define	IPOPT_NUMBER(o)		((o)&IPOPT_NUMBER_MASK)

#define	IPOPT_CONTROL		0x00
#define	IPOPT_RESERVED1		0x20
#define	IPOPT_MEASUREMENT	0x40
#define	IPOPT_RESERVED2		0x60

#define IPOPT_END	(0 |IPOPT_CONTROL)
#define IPOPT_NOOP	(1 |IPOPT_CONTROL)
#define IPOPT_SEC	(2 |IPOPT_CONTROL|IPOPT_COPY)
#define IPOPT_LSRR	(3 |IPOPT_CONTROL|IPOPT_COPY)
#define IPOPT_TIMESTAMP	(4 |IPOPT_MEASUREMENT)
#define IPOPT_RR	(7 |IPOPT_CONTROL)
#define IPOPT_SID	(8 |IPOPT_CONTROL|IPOPT_COPY)
#define IPOPT_SSRR	(9 |IPOPT_CONTROL|IPOPT_COPY)
#define IPOPT_RA	(20|IPOPT_CONTROL|IPOPT_COPY)

#define IPVERSION	4
#define MAXTTL		255
#define IPDEFTTL	64

/* struct timestamp, struct route and MAX_ROUTES are removed.

   REASONS: it is clear that nobody used them because:
   - MAX_ROUTES value was wrong.
   - "struct route" was wrong.
   - "struct timestamp" had fatally misaligned bitfields and was completely unusable.
 */

#define IPOPT_OPTVAL 0
#define IPOPT_OLEN   1
#define IPOPT_OFFSET 2
#define IPOPT_MINOFF 4
#define MAX_IPOPTLEN 40
#define IPOPT_NOP IPOPT_NOOP
#define IPOPT_EOL IPOPT_END
#define IPOPT_TS  IPOPT_TIMESTAMP

#define	IPOPT_TS_TSONLY		0		/* timestamps only */
#define	IPOPT_TS_TSANDADDR	1		/* timestamps and addresses */
#define	IPOPT_TS_PRESPEC	3		/* specified modules only */

struct ip_options {
    __u32		faddr;				/* Saved first hop address */
    unsigned char	optlen;
    unsigned char srr;
    unsigned char rr;
    unsigned char ts;
    unsigned char is_setbyuser:1,			/* Set by setsockopt?			*/
	is_data:1,			/* Options in __data, rather than skb	*/
                is_strictroute:1,		/* Strict source route			*/
			       srr_is_hit:1,			/* Packet destination addr was our one	*/
					  is_changed:1,			/* IP checksum more not valid		*/	
						     rr_needaddr:1,			/* Need to record addr of outgoing dev	*/
								 ts_needtime:1,			/* Need to record timestamp		*/
									     ts_needaddr:1;			/* Need to record addr of outgoing dev  */
    unsigned char router_alert;
    unsigned char __pad1;
    unsigned char __pad2;
    unsigned char __data[0];
};

#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)

struct iphdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
    __u8	ihl:4,
		    version:4;
#elif defined (__BIG_ENDIAN_BITFIELD)
    __u8	version:4,
			ihl:4;
#else
#error	"Please fix <asm/byteorder.h>"
#endif
    __u8	tos;
    __u16	tot_len;
    __u16	id;
    __u16	frag_off;
    __u8	ttl;
    __u8	protocol;
    __u16	check;
    __u32	saddr;
    __u32	daddr;
    /*The options start here. */
}; 

/********** net/ip.h from here... */

struct inet_skb_parm
{
    struct ip_options	opt;		/* Compiled IP options	*/
    u16			redirport;	/* Redirect port	*/
    unsigned char		flags;

#define IPSKB_MASQUERADED	1
#define IPSKB_TRANSLATED	2
#define IPSKB_FORWARDED		4
};

struct ipcm_cookie
{
    u32			addr;
    int			oif;
    struct ip_options	*opt;
};

#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))

struct ip_ra_chain
{
    struct ip_ra_chain	*next;
    struct sock		*sk;
    void			(*destructor)(struct sock *);
};

extern struct ip_ra_chain *ip_ra_chain;
extern rwlock_t ip_ra_lock;

/* IP flags. */
#define IP_CE		0x8000		/* Flag: "Congestion"		*/
#define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
#define IP_MF		0x2000		/* Flag: "More Fragments"	*/
#define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/

#define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/

/*
 *	Functions provided by ip.c
 */
extern void		ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
					      u32 saddr, u32 daddr,
					      struct ip_options *opt);
extern int		ip_rcv(struct sk_buff *skb, struct net_device *dev,
			       struct packet_type *pt);
extern int		ip_local_deliver(struct sk_buff *skb);
extern int		ip_mr_input(struct sk_buff *skb);
extern int		ip_output(struct sk_buff *skb);
extern int		ip_mc_output(struct sk_buff *skb);
extern int		ip_fragment(struct sk_buff *skb, int (*out)(struct sk_buff*));
extern int		ip_do_nat(struct sk_buff *skb);
extern void		ip_send_check(struct iphdr *ip);
extern int		ip_id_count;			  
extern int		ip_queue_xmit(struct sk_buff *skb);
extern void		ip_init(void);
extern int		ip_build_xmit(struct sock *sk,
				      int getfrag (const void *,
						   char *,
						   unsigned int,
						   unsigned int),
				      const void *frag,
				      unsigned length,
				      struct ipcm_cookie *ipc,
				      struct rtable *rt,
				      int flags);

struct ip_reply_arg {
    struct iovec iov[2];   
    int          n_iov;    /* redundant */
    u32 	     csum; 
    int	     csumoffset; /* u16 offset of csum in iov[0].iov_base */
    /* -1 if not needed */ 
}; 

void ip_send_reply(/*XXXKAFstruct sock *sk*/int raw_sock, struct sk_buff *skb, struct ip_reply_arg *arg,
		   unsigned int len); 
extern __inline__ int ip_finish_output(struct sk_buff *skb);
extern int ip_queue_xmit(struct sk_buff *skb);
extern int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen);
extern int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen);

extern int sysctl_local_port_range[2];

extern __inline__ int ip_send(struct sk_buff *skb)
{
    if (skb->len > skb->dst->pmtu)
	return ip_fragment(skb, ip_finish_output);
    else
	return ip_finish_output(skb);
}

extern __inline__
int ip_decrease_ttl(struct iphdr *iph)
{
    u16 check = iph->check;
    check = ntohs(check) + 0x0100;
    if ((check & 0xFF00) == 0)
	check++;		/* carry overflow */
    iph->check = htons(check);
    return --iph->ttl;
}

extern __inline__
int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
{
    return (sk->protinfo.af_inet.pmtudisc == IP_PMTUDISC_DO ||
	    (sk->protinfo.af_inet.pmtudisc == IP_PMTUDISC_WANT &&
	     !(dst->mxlock&(1<<RTAX_MTU))));
}

/*
 *	Map a multicast IP onto multicast MAC for type ethernet.
 */
extern __inline__ void ip_eth_mc_map(u32 addr, char *buf)
{
    addr=ntohl(addr);
    buf[0]=0x01;
    buf[1]=0x00;
    buf[2]=0x5e;
    buf[5]=addr&0xFF;
    addr>>=8;
    buf[4]=addr&0xFF;
    addr>>=8;
    buf[3]=addr&0x7F;
}

extern int	ip_call_ra_chain(struct sk_buff *skb);
/*
 *	Functions provided by ip_fragment.o
 */
 
struct sk_buff *ip_defrag(struct sk_buff *skb);

/*
 *	Functions provided by ip_forward.c
 */
 
extern int ip_forward(struct sk_buff *skb);
extern int ip_net_unreachable(struct sk_buff *skb);
 
/*
 *	Functions provided by ip_options.c
 */
 
extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, u32 daddr, struct rtable *rt, int is_frag);
extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
extern void ip_options_fragment(struct sk_buff *skb);
extern int ip_options_compile(struct ip_options *opt, struct sk_buff *skb);
extern int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen, int user);
extern void ip_options_undo(struct ip_options * opt);
extern void ip_forward_options(struct sk_buff *skb);
extern int ip_options_rcv_srr(struct sk_buff *skb);

/*
 *	Functions provided by ip_sockglue.c
 */

extern void	ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
extern int	ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc);
extern int	ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen);
extern int	ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen);
extern int	ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));

extern int 	ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
extern void	ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 
			      u16 port, u32 info, u8 *payload);
extern void	ip_local_error(struct sock *sk, int err, u32 daddr, u16 dport,
			       u32 info);
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from net/route.h */
// TC_PRIO_xxx from inux/pkt_sched.h
#define TC_PRIO_BESTEFFORT		0
#define TC_PRIO_FILLER			1
#define TC_PRIO_BULK			2
#define TC_PRIO_INTERACTIVE_BULK	4
#define TC_PRIO_INTERACTIVE		6
#define TC_PRIO_CONTROL			7
#define TC_PRIO_MAX			15

#define RT_HASH_DIVISOR	    	256

#define RTO_ONLINK	0x01
#define RTO_TPROXY	0x80000000

#define RTO_CONN	0

struct rtable
{
    union { struct dst_entry dst; } u;

    unsigned	rt_flags;
    unsigned	rt_type;

    __u32	rt_dst;	/* Path destination	*/
    __u32	rt_src;	/* Path source		*/
    int		rt_iif;

    __u32	rt_gateway; /* Path neighbour */

    struct rtable *next; /* KAF: NEW FIELD */
};
#define rt_spec_dst rt_dst
#define rt_oif rt_iif

struct ip_rt_acct
{
    __u32 	o_bytes;
    __u32 	o_packets;
    __u32 	i_bytes;
    __u32 	i_packets;
};

extern struct ip_rt_acct ip_rt_acct[256];
extern rwlock_t ip_rt_acct_lock;

extern void		ip_rt_init(void);
#define ip_route_output(a,b,c,d,e) ip_route_find(a,b,c,e)
#define ip_route_connect(a,b,c,d,e) ip_route_find(a,b,c,e)
extern int		ip_route_find(struct rtable **, u32 dst, u32 src, /*u32 tos, */int oif);
extern int		ip_route_input(struct sk_buff*, u32 dst, u32 src, u8 tos, struct net_device *devin);
#define ip_rt_update_pmtu(dst,mtu) panic("ip_rt_update_pmtu: not implemeneted")
//extern void		ip_rt_update_pmtu(struct dst_entry *dst, unsigned mtu);
extern unsigned		inet_addr_type(u32 addr);
//extern void		ip_rt_multicast_event(struct in_device *);
//extern int		ip_rt_ioctl(unsigned int cmd, void *arg);
#define ip_rt_get_source(addr,rt) \
({ printf("ip_rt_get_source called\n"); memcpy(addr, &(rt->rt_src), 4); })
//extern void		ip_rt_get_source(u8 *src, struct rtable *rt);
//extern int		ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb);


extern __inline__ void ip_rt_put(struct rtable * rt)
{
    if ( rt ) dst_release(&rt->u.dst);
}
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from net/protocol.h */
#define MAX_INET_PROTOS	32		/* Must be a power of 2		*/

/* This is used to register protocols. */
struct inet_protocol 
{
    int			(*handler)(struct sk_buff *skb, unsigned short len);
    void			(*err_handler)(struct sk_buff *skb, unsigned char *dp, int len);
    struct inet_protocol	*next;
    unsigned char		protocol;
    unsigned char		copy:1;
    void			*data;
    const char		*name;
};

extern rwlock_t inet_protocol_lock;

extern struct inet_protocol *inet_protocol_base;
extern struct inet_protocol *inet_protos[MAX_INET_PROTOS];

extern void	inet_add_protocol(struct inet_protocol *prot);
extern int	inet_del_protocol(struct inet_protocol *prot);
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/netfilter.h */
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/inetdevice.h */
struct ipv4_devconf
{
    int	accept_redirects;
    int	send_redirects;
    int	secure_redirects;
    int	shared_media;
    int	accept_source_route;
    int	rp_filter;
    int	proxy_arp;
    int	bootp_relay;
    int	log_martians;
    int	forwarding;
    int	mc_forwarding;
    int	tag;
    void	*sysctl;
};

extern struct ipv4_devconf ipv4_devconf;

struct in_device
{
    struct net_device		*dev;
    atomic_t		refcnt;
    rwlock_t		lock;
    int			dead;
    struct ip_mc_list	*mc_list;	/* IP multicast filter chain    */
    unsigned long		mr_v1_seen;
    struct neigh_parms	*arp_parms;
    struct ipv4_devconf	cnf;
};

#define IN_DEV_FORWARD(in_dev)		((in_dev)->cnf.forwarding)
#define IN_DEV_SOURCE_ROUTE(in_dev)	(ipv4_devconf.accept_source_route && (in_dev)->cnf.accept_source_route)
#define IN_DEV_LOG_MARTIANS(in_dev)	(ipv4_devconf.log_martians || (in_dev)->cnf.log_martians)

#define ip_dev_find(addr) (panic("ip_dev_find: not supported!"),(void*)0)
//extern struct net_device 	*ip_dev_find(u32 addr);
extern int		inet_addr_onlink(struct in_device *in_dev, u32 a, u32 b);
//extern int		devinet_ioctl(unsigned int cmd, void *);
extern void		devinet_init(void);
extern struct in_device *inetdev_init(struct net_device *dev);
extern struct in_device	*inetdev_by_index(int);
extern u32		inet_select_addr(const struct net_device *dev, u32 dst, int scope);
extern void		inet_forward_change(void);

extern rwlock_t inetdev_lock;

#if 0
extern __inline__ struct in_device *
in_dev_get(const struct net_device *dev)
{
    struct in_device *in_dev;

    read_lock(&inetdev_lock);
    in_dev = dev->ip_ptr;
    if (in_dev)
	atomic_inc(&in_dev->refcnt);
    read_unlock(&inetdev_lock);
    return in_dev;
}

extern __inline__ struct in_device *
__in_dev_get(const struct net_device *dev)
{
    return (struct in_device*)dev->ip_ptr;
}
#else
#define in_dev_get(dev) (panic("in_dev_get: not supported"), (void*)0)
#define __in_dev_get(dev) in_dev_get(dev)
#endif

#if 0
extern void in_dev_finish_destroy(struct in_device *idev);
extern __inline__ void
in_dev_put(struct in_device *idev)
{
    if (atomic_dec_and_test(&idev->refcnt))
	in_dev_finish_destroy(idev);
}
#else
// don't think we need these
#define in_dev_put(idev) panic("in_dev_put: not supported!")
#endif

#define __in_dev_put(idev)  atomic_dec(&(idev)->refcnt)
#define in_dev_hold(idev)   atomic_inc(&(idev)->refcnt)
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/if.h */
/* Standard interface flags. */
#define	IFF_UP		0x1		/* interface is up		*/
#define	IFF_BROADCAST	0x2		/* broadcast address valid	*/
#define	IFF_DEBUG	0x4		/* turn on debugging		*/
#define	IFF_LOOPBACK	0x8		/* is a loopback net		*/
#define	IFF_POINTOPOINT	0x10		/* interface is has p-p link	*/
#define	IFF_NOTRAILERS	0x20		/* avoid use of trailers	*/
#define	IFF_RUNNING	0x40		/* resources allocated		*/
#define	IFF_NOARP	0x80		/* no ARP protocol		*/
#define	IFF_PROMISC	0x100		/* receive all packets		*/
#define	IFF_ALLMULTI	0x200		/* receive all multicast packets*/

#define IFF_MASTER	0x400		/* master of a load balancer 	*/
#define IFF_SLAVE	0x800		/* slave of a load balancer	*/

#define IFF_MULTICAST	0x1000		/* Supports multicast		*/

#define IFF_VOLATILE	(IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ALLMULTI)

#define IFF_PORTSEL	0x2000          /* can set media type		*/
#define IFF_AUTOMEDIA	0x4000		/* auto media select active	*/
#define IFF_DYNAMIC	0x8000		/* dialup device with changing addresses*/

struct ifreq 
{
#define IFHWADDRLEN	6
#define	IFNAMSIZ	16
    union
    {
	char	ifrn_name[IFNAMSIZ];
    } ifr_ifrn;
	
    union {
	struct	sockaddr ifru_addr;
	struct	sockaddr ifru_dstaddr;
	struct	sockaddr ifru_broadaddr;
	struct	sockaddr ifru_netmask;
	struct  sockaddr ifru_hwaddr;
	short	ifru_flags;
	int	ifru_ivalue;
	int	ifru_mtu;
	char	ifru_slave[IFNAMSIZ];	/* Just fits the size */
	char	ifru_newname[IFNAMSIZ];
	char *	ifru_data;
    } ifr_ifru;
};

#define ifr_name	ifr_ifrn.ifrn_name	/* interface name 	*/
#define ifr_hwaddr	ifr_ifru.ifru_hwaddr	/* MAC address 		*/
#define	ifr_addr	ifr_ifru.ifru_addr	/* address		*/
#define	ifr_dstaddr	ifr_ifru.ifru_dstaddr	/* other end of p-p lnk	*/
#define	ifr_broadaddr	ifr_ifru.ifru_broadaddr	/* broadcast address	*/
#define	ifr_netmask	ifr_ifru.ifru_netmask	/* interface net mask	*/
#define	ifr_flags	ifr_ifru.ifru_flags	/* flags		*/
#define	ifr_metric	ifr_ifru.ifru_ivalue	/* metric		*/
#define	ifr_mtu		ifr_ifru.ifru_mtu	/* mtu			*/
#define ifr_slave	ifr_ifru.ifru_slave	/* slave device		*/
#define	ifr_data	ifr_ifru.ifru_data	/* for use by interface	*/
#define ifr_ifindex	ifr_ifru.ifru_ivalue	/* interface index	*/
#define ifr_bandwidth	ifr_ifru.ifru_ivalue    /* link bandwidth	*/
#define ifr_qlen	ifr_ifru.ifru_ivalue	/* Queue length 	*/
#define ifr_newname	ifr_ifru.ifru_newname	/* New name		*/

struct ifconf 
{
    int	ifc_len;			/* size of buffer	*/
    union 
    {
	char *			ifcu_buf;
	struct	ifreq 		*ifcu_req;
    } ifc_ifcu;
};
#define	ifc_buf	ifc_ifcu.ifcu_buf		/* buffer address	*/
#define	ifc_req	ifc_ifcu.ifcu_req		/* array of structures	*/
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/if_packet.h */
/* Packet types */
#define PACKET_HOST		0		/* To us		*/
#define PACKET_BROADCAST	1		/* To all		*/
#define PACKET_MULTICAST	2		/* To group		*/
#define PACKET_OTHERHOST	3		/* To someone else 	*/
#define PACKET_OUTGOING		4		/* Outgoing of any type */
/* These ones are invisible by user level */
#define PACKET_LOOPBACK		5		/* MC/BRD frame looped back */
#define PACKET_FASTROUTE	6		/* Fastrouted frame	*/
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/if_ether.h */
/*
 *	IEEE 802.3 Ethernet magic constants.  The frame sizes omit the preamble
 *	and FCS/CRC (frame check sequence). 
 */
#define ETH_ALEN	6		/* Octets in one ethernet addr	 */
#define ETH_HLEN	14		/* Total octets in header.	 */
#define ETH_ZLEN	60		/* Min. octets in frame sans FCS */
#define ETH_DATA_LEN	1500		/* Max. octets in payload	 */
#define ETH_FRAME_LEN	1514		/* Max. octets in frame sans FCS */

/*
 *	These are the defined Ethernet Protocol ID's.
 */
#define ETH_P_IP	0x0800		/* Internet Protocol packet	*/

/*
 *	This is an Ethernet frame header.
 */
struct ethhdr 
{
    unsigned char	h_dest[ETH_ALEN];	/* destination eth addr	*/
    unsigned char	h_source[ETH_ALEN];	/* source ether addr	*/
    unsigned short	h_proto;		/* packet type ID field	*/
};
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/in_route.h */
/* IPv4 routing cache flags */
#define RTCF_DEAD	RTNH_F_DEAD
#define RTCF_ONLINK	RTNH_F_ONLINK

#define RTCF_NOTIFY	0x00010000
#define RTCF_DIRECTDST	0x00020000
#define RTCF_REDIRECTED	0x00040000
#define RTCF_TPROXY	0x00080000

#define RTCF_FAST	0x00200000
#define RTCF_MASQ	0x00400000
#define RTCF_SNAT	0x00800000
#define RTCF_DOREDIRECT 0x01000000
#define RTCF_DIRECTSRC	0x04000000
#define RTCF_DNAT	0x08000000
#define RTCF_BROADCAST	0x10000000
#define RTCF_MULTICAST	0x20000000
#define RTCF_REJECT	0x40000000
#define RTCF_LOCAL	0x80000000

#define RTCF_NAT	(RTCF_DNAT|RTCF_SNAT)

#define RT_TOS(tos)	((tos)&IPTOS_TOS_MASK)
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/capability.h */
/*
 * This isn't from linux/capabilty.h at all! We should never have to make
 * privilege decisions, since we're no longer in the kernel.
 */
#define capable(cap) 1
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from linux/errqueue.h */
struct sock_extended_err
{
    __u32	ee_errno;	
    __u8	ee_origin;
    __u8	ee_type;
    __u8	ee_code;
    __u8	ee_pad;
    __u32   ee_info;
    __u32   ee_data;
};

#define SO_EE_ORIGIN_NONE	0
#define SO_EE_ORIGIN_LOCAL	1
#define SO_EE_ORIGIN_ICMP	2
#define SO_EE_ORIGIN_ICMP6	3

#define SO_EE_OFFENDER(ee)	((struct sockaddr*)((ee)+1))

#define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb))

struct sock_exterr_skb
{
    union {
	struct inet_skb_parm	h4;
    } header;
    struct sock_extended_err	ee;
    u16				addr_offset;
    u16				port;
};
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from {linux|asm}/sockios.h */

/* Socket-level I/O control calls. */
#define FIOSETOWN 	0x8901
#define SIOCSPGRP	0x8902
#define FIOGETOWN	0x8903
#define SIOCGPGRP	0x8904
#define SIOCATMARK	0x8905
#define SIOCGSTAMP	0x8906		/* Get stamp */

/* Routing table calls. */
#define SIOCADDRT	0x890B		/* add routing table entry	*/
#define SIOCDELRT	0x890C		/* delete routing table entry	*/
#define SIOCRTMSG	0x890D		/* call to routing system	*/

/* Socket configuration controls. */
#define SIOCGIFNAME	0x8910		/* get iface name		*/
#define SIOCSIFLINK	0x8911		/* set iface channel		*/
#define SIOCGIFCONF	0x8912		/* get iface list		*/
#define SIOCGIFFLAGS	0x8913		/* get flags			*/
#define SIOCSIFFLAGS	0x8914		/* set flags			*/
#define SIOCGIFADDR	0x8915		/* get PA address		*/
#define SIOCSIFADDR	0x8916		/* set PA address		*/
#define SIOCGIFDSTADDR	0x8917		/* get remote PA address	*/
#define SIOCSIFDSTADDR	0x8918		/* set remote PA address	*/
#define SIOCGIFBRDADDR	0x8919		/* get broadcast PA address	*/
#define SIOCSIFBRDADDR	0x891a		/* set broadcast PA address	*/
#define SIOCGIFNETMASK	0x891b		/* get network PA mask		*/
#define SIOCSIFNETMASK	0x891c		/* set network PA mask		*/
#define SIOCGIFMETRIC	0x891d		/* get metric			*/
#define SIOCSIFMETRIC	0x891e		/* set metric			*/
#define SIOCGIFMEM	0x891f		/* get memory address (BSD)	*/
#define SIOCSIFMEM	0x8920		/* set memory address (BSD)	*/
#define SIOCGIFMTU	0x8921		/* get MTU size			*/
#define SIOCSIFMTU	0x8922		/* set MTU size			*/
#define SIOCSIFNAME	0x8923		/* set interface name */
#define	SIOCSIFHWADDR	0x8924		/* set hardware address 	*/
#define SIOCGIFENCAP	0x8925		/* get/set encapsulations       */
#define SIOCSIFENCAP	0x8926		
#define SIOCGIFHWADDR	0x8927		/* Get hardware address		*/
#define SIOCGIFSLAVE	0x8929		/* Driver slaving support	*/
#define SIOCSIFSLAVE	0x8930
#define SIOCADDMULTI	0x8931		/* Multicast address lists	*/
#define SIOCDELMULTI	0x8932
#define SIOCGIFINDEX	0x8933		/* name -> if_index mapping	*/
#define SIOGIFINDEX	SIOCGIFINDEX	/* misprint compatibility :-)	*/
#define SIOCSIFPFLAGS	0x8934		/* set/get extended flags set	*/
#define SIOCGIFPFLAGS	0x8935
#define SIOCDIFADDR	0x8936		/* delete PA address		*/
#define	SIOCSIFHWBROADCAST	0x8937	/* set hardware broadcast addr	*/
#define SIOCGIFCOUNT	0x8938		/* get number of devices */

#define SIOCGIFBR	0x8940		/* Bridging support		*/
#define SIOCSIFBR	0x8941		/* Set bridging options 	*/

#define SIOCGIFTXQLEN	0x8942		/* Get the tx queue length	*/
#define SIOCSIFTXQLEN	0x8943		/* Set the tx queue length 	*/


/* ARP cache control calls. */
/*  0x8950 - 0x8952  * obsolete calls, don't re-use */
#define SIOCDARP	0x8953		/* delete ARP table entry	*/
#define SIOCGARP	0x8954		/* get ARP table entry		*/
#define SIOCSARP	0x8955		/* set ARP table entry		*/

/* RARP cache control calls. */
#define SIOCDRARP	0x8960		/* delete RARP table entry	*/
#define SIOCGRARP	0x8961		/* get RARP table entry		*/
#define SIOCSRARP	0x8962		/* set RARP table entry		*/

/* Driver configuration calls */

#define SIOCGIFMAP	0x8970		/* Get device parameters	*/
#define SIOCSIFMAP	0x8971		/* Set device parameters	*/

/* DLCI configuration calls */

#define SIOCADDDLCI	0x8980		/* Create new DLCI device	*/
#define SIOCDELDLCI	0x8981		/* Delete DLCI device		*/

/* Device private ioctl calls */

/*
 *	These 16 ioctls are available to devices via the do_ioctl() device
 *	vector. Each device should include this file and redefine these names
 *	as their own. Because these are device dependent it is a good idea
 *	_NOT_ to issue them to random objects and hope.
 */
 
#define SIOCDEVPRIVATE	0x89F0	/* to 89FF */

/*
 *	These 16 ioctl calls are protocol private
 */
 
#define SIOCPROTOPRIVATE 0x89E0 /* to 89EF */
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from net/inet_common.h */

extern struct proto_ops		inet_stream_ops;
extern struct proto_ops		inet_dgram_ops;

/*
 *	INET4 prototypes used by INET6
 */

extern void			inet_remove_sock(struct sock *sk1);
extern void			inet_put_sock(unsigned short num, 
					      struct sock *sk);
extern int			inet_release(struct socket *sock);
extern int			my_debug_func(struct socket *sock);
extern int			inet_stream_connect(struct socket *sock,
						    struct sockaddr * uaddr,
						    int addr_len, int flags);
extern int			inet_dgram_connect(struct socket *sock, 
						   struct sockaddr * uaddr,
						   int addr_len, int flags);
extern int			inet_accept(struct socket *sock, 
					    struct socket **newsock, int flags);
extern int			inet_recvmsg(struct socket *sock, 
					     struct msghdr *ubuf, 
					     int size, int flags, struct scm_cookie *scm);
extern int			inet_sendmsg(struct socket *sock, 
					     struct msghdr *msg, 
					     int size, struct scm_cookie *scm);
extern int			inet_shutdown(struct socket *sock, int how);
extern unsigned int		inet_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
extern int			inet_setsockopt(struct socket *sock, int level,
						int optname, char *optval, 
						int optlen);
extern int			inet_getsockopt(struct socket *sock, int level,
						int optname, char *optval, 
						int *optlen);
extern int			inet_fcntl(struct socket *sock, 
					   unsigned int cmd, 
					   unsigned long arg);
extern int			inet_listen(struct socket *sock, int backlog);

extern void			inet_sock_release(struct sock *sk);
extern void			inet_sock_destruct(struct sock *sk);
extern atomic_t			inet_sock_nr;
/*****************************************************************************/

/*****************************************************************************/
/*** XXX -- from {linux|net}/udp.h */
struct udphdr {
    __u16	source;
    __u16	dest;
    __u16	len;
    __u16	check;
};

#define UDP_HTABLE_SIZE		128

/* udp.c: This needs to be shared by v4 and v6 because the lookup
 *        and hashing code needs to work with different AF's yet
 *        the port space is shared.
 */
extern struct sock *udp_hash[UDP_HTABLE_SIZE];
extern rwlock_t udp_hash_lock;

extern int udp_port_rover;

static inline int udp_lport_inuse(u16 num)
{
    struct sock *sk = udp_hash[num & (UDP_HTABLE_SIZE - 1)];

    for(; sk != NULL; sk = sk->next) {
	if(sk->num == num)
	    return 1;
    }
    return 0;
}

/* Note: this must match 'valbool' in sock_setsockopt */
#define UDP_CSUM_NOXMIT		1

/* Used by SunRPC/xprt layer. */
#define UDP_CSUM_NORCV		2

/* Default, as per the RFC, is to always do csums. */
#define UDP_CSUM_DEFAULT	0

extern struct proto udp_prot;

extern void	udp_err(struct sk_buff *, unsigned char *, int);
extern int	udp_connect(struct sock *sk,
			    struct sockaddr *usin, int addr_len);

extern int	udp_sendmsg(struct sock *sk, struct msghdr *msg, int len);

extern int	udp_rcv(struct sk_buff *skb, unsigned short len);
extern int	udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int	udp_disconnect(struct sock *sk, int flags);
/*****************************************************************************/

/* This is for all connections with a full identity, no wildcards.
 * New scheme, half the table is for TIME_WAIT, the other half is
 * for the rest.  I'll experiment with dynamic table growth later.
 */
struct tcp_ehash_bucket {
    rwlock_t	lock;
    struct sock	*chain;
} __attribute__((__aligned__(8)));

extern int tcp_ehash_size;
extern struct tcp_ehash_bucket *tcp_ehash;

/* This is for listening sockets, thus all sockets which possess wildcards. */
#define TCP_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */

/* tcp_ipv4.c: These need to be shared by v4 and v6 because the lookup
 *             and hashing code needs to work with different AF's yet
 *             the port space is shared.
 */
extern struct sock *tcp_listening_hash[TCP_LHTABLE_SIZE];
extern rwlock_t tcp_lhash_lock;
extern atomic_t tcp_lhash_users;
extern wait_queue_head_t tcp_lhash_wait;

/* There are a few simple rules, which allow for local port reuse by
 * an application.  In essence:
 *
 *	1) Sockets bound to different interfaces may share a local port.
 *	   Failing that, goto test 2.
 *	2) If all sockets have sk->reuse set, and none of them are in
 *	   TCP_LISTEN state, the port may be shared.
 *	   Failing that, goto test 3.
 *	3) If all sockets are bound to a specific sk->rcv_saddr local
 *	   address, and none of them are the same, the port may be
 *	   shared.
 *	   Failing this, the port cannot be shared.
 *
 * The interesting point, is test #2.  This is what an FTP server does
 * all day.  To optimize this case we use a specific flag bit defined
 * below.  As we add sockets to a bind bucket list, we perform a
 * check of: (newsk->reuse && (newsk->state != TCP_LISTEN))
 * As long as all sockets added to a bind bucket pass this test,
 * the flag bit will be set.
 * The resulting situation is that tcp_v[46]_verify_bind() can just check
 * for this flag bit, if it is set and the socket trying to bind has
 * sk->reuse set, we don't even have to walk the owners list at all,
 * we return that it is ok to bind this socket to the requested local port.
 *
 * Sounds like a lot of work, but it is worth it.  In a more naive
 * implementation (ie. current FreeBSD etc.) the entire list of ports
 * must be walked for each data port opened by an ftp server.  Needless
 * to say, this does not scale at all.  With a couple thousand FTP
 * users logged onto your box, isn't it nice to know that new data
 * ports are created in O(1) time?  I thought so. ;-)	-DaveM
 */
struct tcp_bind_bucket {
    unsigned short		port;
    unsigned short		fastreuse;
    struct tcp_bind_bucket	*next;
    struct sock		*owners;
    struct tcp_bind_bucket	**pprev;
};

struct tcp_bind_hashbucket {
    spinlock_t		lock;
    struct tcp_bind_bucket	*chain;
};

extern struct tcp_bind_hashbucket *tcp_bhash;
extern int tcp_bhash_size;

extern spinlock_t tcp_portalloc_lock;

extern kmem_cache_t *tcp_bucket_cachep;
extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
						 unsigned short snum);
extern void tcp_bucket_unlock(struct sock *sk);
extern int tcp_port_rover;
extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);

/* These are AF independent. */
static __inline__ int tcp_bhashfn(__u16 lport)
{
    return (lport & (tcp_bhash_size - 1));
}

/* This is a TIME_WAIT bucket.  It works around the memory consumption
 * problems of sockets in such a state on heavily loaded servers, but
 * without violating the protocol specification.
 */
struct tcp_tw_bucket {
    /* These _must_ match the beginning of struct sock precisely.
     * XXX Yes I know this is gross, but I'd have to edit every single
     * XXX networking file if I created a "struct sock_header". -DaveM
     */
    __u32			daddr;
    __u32			rcv_saddr;
    __u16			dport;
    unsigned short		num;
    int			bound_dev_if;
    struct sock		*next;
    struct sock		**pprev;
    struct sock		*bind_next;
    struct sock		**bind_pprev;
    unsigned char		state,
	zapped;
    __u16			sport;
    unsigned short		family;
    unsigned char		reuse,
	nonagle;
    atomic_t		refcnt;

    /* And these are ours. */
    int			hashent;
    __u32			rcv_nxt;
    __u32			snd_nxt;
    __u32			ts_recent;
    long			ts_recent_stamp;
    struct tcp_bind_bucket	*tb;
    struct tcp_tw_bucket	*next_death;
    struct tcp_tw_bucket	**pprev_death;
    int			death_slot;
#ifdef CONFIG_TCP_TW_RECYCLE
    unsigned long		ttd;
    int			rto;
#endif
};

extern kmem_cache_t *tcp_timewait_cachep;

extern __inline__ void tcp_tw_put(struct tcp_tw_bucket *tw)
{
    if (atomic_dec_and_test(&tw->refcnt)) {
#ifdef INET_REFCNT_DEBUG
	printk(KERN_DEBUG "tw_bucket %p released\n", tw);
#endif
	kmem_cache_free(tcp_timewait_cachep, tw);
    }
}

extern int tcp_tw_death_row_slot;
extern void tcp_timewait_kill(struct tcp_tw_bucket *tw);
extern void tcp_tw_schedule(struct tcp_tw_bucket *tw);
extern void tcp_tw_reschedule(struct tcp_tw_bucket *tw);
extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);


/* Socket demux engine toys. */
#ifdef __BIG_ENDIAN
#define TCP_COMBINED_PORTS(__sport, __dport) \
	(((__u32)(__sport)<<16) | (__u32)(__dport))
#else /* __LITTLE_ENDIAN */
#define TCP_COMBINED_PORTS(__sport, __dport) \
	(((__u32)(__dport)<<16) | (__u32)(__sport))
#endif

#if (BITS_PER_LONG == 64)
#ifdef __BIG_ENDIAN
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
	__u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
#else /* __LITTLE_ENDIAN */
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
	__u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
#endif /* __BIG_ENDIAN */
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
	(((*((__u64 *)&((__sk)->daddr)))== (__cookie))	&&		\
	 ((*((__u32 *)&((__sk)->dport)))== (__ports))   &&		\
	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
	(((__sk)->daddr			== (__saddr))	&&		\
	 ((__sk)->rcv_saddr		== (__daddr))	&&		\
	 ((*((__u32 *)&((__sk)->dport)))== (__ports))   &&		\
	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
#endif /* 64-bit arch */

#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif)			   \
	(((*((__u32 *)&((__sk)->dport)))== (__ports))   			&& \
	 ((__sk)->family		== AF_INET6)				&& \
	 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr))		&& \
	 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr))	&& \
	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))

/* These can have wildcards, don't try too hard. */
static __inline__ int tcp_lhashfn(unsigned short num)
{
    return num & (TCP_LHTABLE_SIZE - 1);
}

static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
{
    return tcp_lhashfn(sk->num);
}

/* Note, that it is > than ipv6 header */
#define NETHDR_SIZE	(sizeof(struct iphdr) + 40)

/*
 * 40 is maximal IP options size
 * 20 is the maximum TCP options size we can currently construct on a SYN.
 * 40 is the maximum possible TCP options size.
 */

#define MAX_SYN_SIZE	(NETHDR_SIZE + sizeof(struct tcphdr) + 20 + MAX_HEADER + 15)
#define MAX_FIN_SIZE	(NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)
#define BASE_ACK_SIZE	(NETHDR_SIZE + MAX_HEADER + 15)
#define MAX_ACK_SIZE	(NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)
#define MAX_RESET_SIZE	(NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)
#define MAX_TCPHEADER_SIZE (NETHDR_SIZE + sizeof(struct tcphdr) + 20 + MAX_HEADER + 15)

/* 
 * Never offer a window over 32767 without using window scaling. Some
 * poor stacks do signed 16bit maths! 
 */
#define MAX_WINDOW	32767	
#define MAX_DELAY_ACK	2

/* 
 * How much of the receive buffer do we advertize 
 * (the rest is reserved for headers and driver packet overhead)
 * Use a power of 2.
 */
#define WINDOW_ADVERTISE_DIVISOR 1 // !!! was 2

/* urg_data states */
#define URG_VALID	0x0100
#define URG_NOTYET	0x0200
#define URG_READ	0x0400

#define TCP_RETR1	7	/*
				 * This is how many retries it does before it
				 * tries to figure out if the gateway is
				 * down.
				 */

#define TCP_RETR2	15	/*
				 * This should take at least
				 * 90 minutes to time out.
				 */

#define TCP_TIMEOUT_LEN	(15*60*HZ) /* should be about 15 mins		*/
#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to successfully 
				  * close the socket, about 60 seconds	*/
#define TCP_FIN_TIMEOUT (3*60*HZ) /* BSD style FIN_WAIT2 deadlock breaker */

#define TCP_ACK_TIME	(3*HZ)	/* time to delay before sending an ACK	*/
#define TCP_WRITE_TIME	(30*HZ)	/* initial time to wait for an ACK,
			         * after last transmit			*/
#define TCP_TIMEOUT_INIT (3*HZ)	/* RFC 1122 initial timeout value	*/
#define TCP_SYN_RETRIES	 10	/* number of times to retry opening a
				 * connection 	(TCP_RETR2-....)	*/
#define TCP_PROBEWAIT_LEN (1*HZ)/* time to wait between probes when
				 * I've got something to write and
				 * there is no window			*/
#define TCP_KEEPALIVE_TIME (120*60*HZ)		/* two hours */
#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
#define TCP_KEEPALIVE_INTVL	(75*HZ)

#define MAX_TCP_KEEPIDLE	32767
#define MAX_TCP_KEEPINTVL	32767
#define MAX_TCP_KEEPCNT		127
#define MAX_TCP_SYNCNT		127

#define TCP_SYNACK_PERIOD	(HZ/2) /* How often to run the synack slow timer */
#define TCP_QUICK_TRIES		8  /* How often we try to retransmit, until
				    * we tell the link layer that it is something
				    * wrong (e.g. that it can expire redirects) */

/* TIME_WAIT reaping mechanism. */
#define TCP_TWKILL_SLOTS	8	/* Please keep this a power of 2. */
#define TCP_TWKILL_PERIOD	((HZ*60)/TCP_TWKILL_SLOTS)

/*
 *	TCP option
 */
 
#define TCPOPT_NOP		1	/* Padding */
#define TCPOPT_EOL		0	/* End of options */
#define TCPOPT_MSS		2	/* Segment size negotiating */
#define TCPOPT_WINDOW		3	/* Window scaling */
#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
#define TCPOPT_SACK             5       /* SACK Block */
#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */

#define TCPOPT_MULTIPATH	30
#define TCPOPT_NEW_SUBFLOW	31
#define TCPOPT_DATA_SEQ		32
#define TCPOPT_DATA_FIN		33
#define TCPOPT_GET_OVER_IT 	34

/*
 *     TCP option lengths
 */

#define TCPOLEN_MSS            	4
#define TCPOLEN_WINDOW         	3
#define TCPOLEN_SACK_PERM      	2
#define TCPOLEN_TIMESTAMP      	10
#define TCPOLEN_DATA_SEQ 4


//plus 4 bytes for each source IP used

/* But this is what stacks really send out. */
#define TCPOLEN_TSTAMP_ALIGNED		12
#define TCPOLEN_WSCALE_ALIGNED		4
#define TCPOLEN_SACKPERM_ALIGNED	4
#define TCPOLEN_SACK_BASE		2
#define TCPOLEN_SACK_BASE_ALIGNED	4
#define TCPOLEN_SACK_PERBLOCK		8

#define TCPOLEN_MULTIPATH_ALIGNED 		8	
#define TCPOLEN_DATA_SEQ_ALIGNED		8
#define TCPOLEN_DATA_FIN_ALIGNED		4
#define TCPOLEN_GET_OVER_IT_ALIGNED		12


#define TIME_WRITE	1	/* Not yet used */
#define TIME_RETRANS	2	/* Retransmit timer */
#define TIME_DACK	3	/* Delayed ack timer */
#define TIME_PROBE0	4
#define TIME_KEEPOPEN	5

/* sysctl variables for tcp */
extern int sysctl_tcp_keepalive_time;
extern int sysctl_tcp_keepalive_probes;
extern int sysctl_tcp_keepalive_intvl;
extern int sysctl_tcp_syn_retries;

struct open_request;

struct or_calltable {
    int  family;
    void (*rtx_syn_ack)	(struct sock *sk, struct open_request *req);
    void (*send_ack)	(struct sk_buff *skb, struct open_request *req);
    void (*destructor)	(struct open_request *req);
    void (*send_reset)	(struct sk_buff *skb);
};

struct tcp_v4_open_req {
    __u32			loc_addr;
    __u32			rmt_addr;
    struct ip_options	*opt;
};

/* this structure is too big */
struct open_request {
    struct open_request	*dl_next; /* Must be first member! */
    __u32			rcv_isn;
    __u32			snt_isn;
    __u16			rmt_port;
    __u16			mss;
    __u8			retrans;
    __u8			__pad;
    unsigned snd_wscale : 4, 
	rcv_wscale : 4, 
		     tstamp_ok : 1,
				 sack_ok : 1,
					   wscale_ok : 1;
    /* The following two fields can be easily recomputed I think -AK */
    __u32			window_clamp;	/* window clamp at creation time */
    __u32			rcv_wnd;	/* rcv_wnd offered first time */
    __u32			ts_recent;
    unsigned long		expires;
    struct or_calltable	*req_class;
    struct sock		*sk;
    union {
	struct tcp_v4_open_req v4_req;
    } af;

  struct multipath_options* mopt;
};

/* SLAB cache for open requests. */
extern kmem_cache_t *tcp_openreq_cachep;

#define tcp_openreq_alloc()	kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
#define tcp_openreq_free(req)	kmem_cache_free(tcp_openreq_cachep, req)

#define TCP_INET_FAMILY(fam) 1

/*
 *	Pointers to address related TCP functions
 *	(i.e. things that depend on the address family)
 *
 * 	BUGGG_FUTURE: all the idea behind this struct is wrong.
 *	It mixes socket frontend with transport function.
 *	With port sharing between IPv6/v4 it gives the only advantage,
 *	only poor IPv6 needs to permanently recheck, that it
 *	is still IPv6 8)8) It must be cleaned up as soon as possible.
 *						--ANK (980802)
 */

struct tcp_func {
    int			(*queue_xmit)		(struct sk_buff *skb);

    void			(*send_check)		(struct sock *sk,
							 struct tcphdr *th,
							 int len,
							 struct sk_buff *skb);

    int			(*rebuild_header)	(struct sock *sk);

    int			(*conn_request)		(struct sock *sk,
						 struct sk_buff *skb);

    struct sock *		(*syn_recv_sock)	(struct sock *sk,
							 struct sk_buff *skb,
							 struct open_request *req,
							 struct dst_entry *dst);
	
    int			(*hash_connecting)	(struct sock *sk);

    __u16			net_header_len;



    int			(*setsockopt)		(struct sock *sk, 
						 int level, 
						 int optname, 
						 char *optval, 
						 int optlen);

    int			(*getsockopt)		(struct sock *sk, 
						 int level, 
						 int optname, 
						 char *optval, 
						 int *optlen);


    void			(*addr2sockaddr)	(struct sock *sk,
							 struct sockaddr *);

    int sockaddr_len;
};

/*
 * The next routines deal with comparing 32 bit unsigned ints
 * and worry about wraparound (automatic with unsigned arithmetic).
 */

extern __inline int before(__u32 seq1, __u32 seq2)
{
    return (__s32)(seq1-seq2) < 0;
}

//us seq2<=seq1?
extern __inline int after(__u32 seq1, __u32 seq2)
{
    return (__s32)(seq2-seq1) < 0;
}


/* is s2<=s1<=s3 ? */
extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
{
    return seq3 - seq2 >= seq1 - seq2;
}

extern struct proto tcp_prot;

extern void			tcp_put_port(struct sock *sk);
extern void			__tcp_put_port(struct sock *sk);
extern void			tcp_inherit_port(struct sock *sk, struct sock *child);

extern void			tcp_v4_err(struct sk_buff *skb,
					   unsigned char *, int);

extern void			tcp_shutdown (struct sock *sk, int how);

extern int			tcp_v4_rcv(struct sk_buff *skb,
					   unsigned short len);

extern int			tcp_do_sendmsg(struct sock *sk, struct msghdr *msg);

extern int			tcp_ioctl(struct sock *sk, 
					  int cmd, 
					  unsigned long arg);

extern int			tcp_rcv_state_process(struct sock *sk, 
						      struct sk_buff *skb,
						      struct tcphdr *th,
						      unsigned len);

extern int			tcp_rcv_established(struct sock *sk, 
						    struct sk_buff *skb,
						    struct tcphdr *th, 
						    unsigned len);

enum tcp_tw_status
{
    TCP_TW_SUCCESS = 0,
    TCP_TW_RST = 1,
    TCP_TW_ACK = 2,
    TCP_TW_SYN = 3
};

extern enum tcp_tw_status	tcp_timewait_state_process(struct tcp_tw_bucket *tw,
							   struct sk_buff *skb,
							   struct tcphdr *th,
							   unsigned len);

extern struct sock *		tcp_check_req(struct sock *sk,struct sk_buff *skb,
					      struct open_request *req,
					      struct open_request *prev);

extern void			tcp_close(struct sock *sk, 
					  long timeout);
extern struct sock *		tcp_accept(struct sock *sk, int flags, int *err);
extern unsigned int		tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
extern void			tcp_write_space(struct sock *sk); 

extern int			tcp_getsockopt(struct sock *sk, int level, 
					       int optname, char *optval, 
					       int *optlen);
extern int			tcp_setsockopt(struct sock *sk, int level, 
					       int optname, char *optval, 
					       int optlen);
extern void			tcp_set_keepalive(struct sock *sk, int val);
extern int			tcp_recvmsg(struct sock *sk, 
					    struct msghdr *msg,
					    int len, int nonblock, 
					    int flags, int *addr_len);

extern void			tcp_parse_options(struct sock *sk, struct tcphdr *th,
						  struct tcp_opt *tp, struct multipath_options* mopt, int no_fancy);

/*
 *	TCP v4 functions exported for the inet6 API
 */

extern int		       	tcp_v4_rebuild_header(struct sock *sk);

extern int		       	tcp_v4_build_header(struct sock *sk, 
						    struct sk_buff *skb);

extern void		       	tcp_v4_send_check(struct sock *sk, 
						  struct tcphdr *th, int len, 
						  struct sk_buff *skb);

extern int			tcp_v4_conn_request(struct sock *sk,
						    struct sk_buff *skb);

extern struct sock *		tcp_create_openreq_child(struct sock *sk,
							 struct open_request *req,
							 struct sk_buff *skb);

extern struct sock *		tcp_v4_syn_recv_sock(struct sock *sk,
						     struct sk_buff *skb,
						     struct open_request *req,
						     struct dst_entry *dst);

extern int			tcp_v4_do_rcv(struct sock *sk,
					      struct sk_buff *skb);

extern int			tcp_v4_connect(struct sock *sk,
					       struct sockaddr *uaddr,
					       int addr_len);

extern int			tcp_connect(struct sock *sk,
					    struct sk_buff *skb);

extern struct sk_buff *		tcp_make_synack(struct sock *sk,
						struct dst_entry *dst,
						struct open_request *req);

extern int			tcp_disconnect(struct sock *sk, int flags);

extern void			tcp_unhash(struct sock *sk);

extern int			tcp_v4_hash_connecting(struct sock *sk);


/* From syncookies.c */
extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
				    struct ip_options *opt);
#define cookie_v4_init_sequence(a,b,c) 0

/* tcp_output.c */

extern void tcp_read_wakeup(struct sock *);
extern void tcp_write_xmit(struct sock *);

#ifdef DEBUG_WRITE_XMIT
extern void tcp_write_xmit1(struct sock *);
extern void tcp_write_xmit2(struct sock *);
#endif

extern void tcp_time_wait(struct sock *);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_fack_retransmit(struct sock *);
extern void tcp_xmit_retransmit_queue(struct sock *);
extern void tcp_simple_retransmit(struct sock *);

extern void tcp_send_probe0(struct sock *);
extern void tcp_send_partial(struct sock *);
extern void tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk);
extern void tcp_send_active_reset(struct sock *sk, int priority);
extern int  tcp_send_synack(struct sock *);
extern void tcp_transmit_skb(struct sock *, struct sk_buff *, int);
extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue);
extern void tcp_send_ack(struct sock *sk);
extern void tcp_send_delayed_ack(struct sock *sk, int max_timeout);

/* tcp_timer.c */
extern void tcp_reset_xmit_timer(struct sock *, int, unsigned long);
extern void tcp_init_xmit_timers(struct sock *);
extern void tcp_clear_xmit_timers(struct sock *);

extern void tcp_retransmit_timer(unsigned long);
extern void tcp_delack_timer(unsigned long);
extern void tcp_probe_timer(unsigned long);

extern void tcp_delete_keepalive_timer (struct sock *);
extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
extern void tcp_keepalive_timer (unsigned long);

/*
 *	TCP slow timer
 */
extern struct timer_list	tcp_slow_timer;

struct tcp_sl_timer {
    atomic_t	count;
    unsigned long	period;
    unsigned long	last;
    void (*handler)	(unsigned long);
};

#define TCP_SLT_SYNACK		0
#define TCP_SLT_TWKILL		1
#define TCP_SLT_MAX		2

extern struct tcp_sl_timer tcp_slt_array[TCP_SLT_MAX];
 
extern int tcp_sync_mss(struct sock *sk, u32 pmtu);


extern void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent, struct multipath_pcb* mpcb,int ip_list);

/* Compute the current effective MSS, taking SACKs and IP options,
 * and even PMTU discovery events into account.
 */

extern unsigned int tcp_current_mss(struct sock *sk);


/* Initialize RCV_MSS value.
 * RCV_MSS is an our guess about MSS used by the peer.
 * We haven't any direct information about the MSS.
 * It's better to underestimate the RCV_MSS rather than overestimate.
 * Overestimations make us ACKing less frequently than needed.
 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
 */

extern __inline__ void tcp_initialize_rcv_mss(struct sock *sk)
{
    struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
    struct dst_entry *dst = __sk_dst_get(sk);
    int mss;

    if (dst)
	mss = dst->advmss;
    else
	mss = tp->mss_cache;

    tp->rcv_mss = max(min(mss, 536), 8);
}

/* Compute the actual receive window we are currently advertising.
 * Rcv_nxt can be after the window if our peer push more data
 * than the offered window.
 */
static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
{
    s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;

    if (win < 0)
	win = 0;
    return (u32) win;
}

u32 multipath_tcp_receive_window(struct mtcp_opt *tp);

/* Choose a new window, without checks for shrinking, and without
 * scaling applied to the result.  The caller does these things
 * if necessary.  This is a "raw" window selection.
 */
extern u32	__tcp_select_window(struct sock *sk);
extern u32	__multipath_tcp_select_window(struct multipath_pcb *sk);

/* Chose a new window to advertise, update state in tcp_opt for the
 * socket, and return result with RFC1323 scaling applied.  The return
 * value can be stuffed directly into th->window for an outgoing
 * frame.
 */
extern u16 tcp_select_window(struct sock *sk);


/* See if we can advertise non-zero, and if so how much we
 * can increase our advertisement.  If it becomes more than
 * twice what we are talking about right now, return true.
 */
extern __inline__ int tcp_raise_window(struct sock *sk)
{
    struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
    u32 cur_win = tcp_receive_window(tp);
    u32 new_win = __tcp_select_window(sk);

    return (new_win && (new_win > (cur_win << 1)));
}


/* TCP timestamps are only 32-bits, this causes a slight
 * complication on 64-bit systems since we store a snapshot
 * of jiffies in the buffer control blocks below.  We decidely
 * only use of the low 32-bits of jiffies and hide the ugly
 * casts with the following macro.
 */
#define tcp_time_stamp		((__u32)(jiffies))


/* This is what the send packet queueing engine uses to pass
 * TCP per-packet control information to the transmission
 * code.  We also store the host-order sequence numbers in
 * here too.  This is 36 bytes on 32-bit architectures,
 * 40 bytes on 64-bit machines, if this grows please adjust
 * skbuff.h:skbuff->cb[xxx] size appropriately.
 */
struct tcp_skb_cb {
    union {
	struct inet_skb_parm	h4;
    } header;	/* For incoming frames		*/
    __u32		seq;		/* Starting sequence number	*/
    __u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
    __u32		when;		/* used to compute rtt's	*/
    __u8		flags;		/* TCP header flags.		*/

    /* NOTE: These must match up to the flags byte in a
     *       real TCP header.
     */
#define TCPCB_FLAG_FIN		0x01
#define TCPCB_FLAG_SYN		0x02
#define TCPCB_FLAG_RST		0x04
#define TCPCB_FLAG_PSH		0x08
#define TCPCB_FLAG_ACK		0x10
#define TCPCB_FLAG_URG		0x20

    __u8		sacked;		/* State flags for SACK/FACK.	*/
#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/

    __u16		urg_ptr;	/* Valid w/URG flags is set.	*/
    __u32		ack_seq;	/* Sequence number ACK'd	*/

  __u32 data_seq;
  __u32 end_data_seq;
};

#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))

/* This determines how many packets are "in the network" to the best
 * of our knowledge.  In many cases it is conservative, but where
 * detailed information is available from the receiver (via SACK
 * blocks etc.) we can make more aggressive calculations.
 *
 * Use this for decisions involving congestion control, use just
 * tp->packets_out to determine if the send queue is empty or not.
 *
 * Read this equation as:
 *
 *	"Packets sent once on transmission queue" MINUS
 *	"Packets acknowledged by FACK information" PLUS
 *	"Packets fast retransmitted"
 */
static __inline__ int tcp_packets_in_flight(struct tcp_opt *tp)
{
    return tp->packets_out - tp->fackets_out + tp->retrans_out;
}

/* Recalculate snd_ssthresh, we want to set it to:
 *
 * 	one half the current congestion window, but no
 *	less than two segments
 *
 * We must take into account the current send window
 * as well, however we keep track of that using different
 * units so a conversion is necessary.  -DaveM
 *
 * RED-PEN.
 *  RFC 2581: "an easy mistake to make is to simply use cwnd,
 *             rather than FlightSize"
 * I see no references to FlightSize here. snd_wnd is not FlightSize,
 * it is also apriory characteristics.
 *
 *   FlightSize = min((snd_nxt-snd_una)/mss, packets_out) ?
 */
__u32 tcp_recalc_ssthresh(struct tcp_opt *tp,struct multipath_pcb* mpcb);


/* This checks if the data bearing packet SKB (usually tp->send_head)
 * should be put on the wire right now.
 */
int tcp_snd_test(struct sock *sk, struct sk_buff *skb);
int slim_tcp_snd_test(struct sock* sk, int max_len);



/* Push out any pending frames which were held back due to
 * TCP_CORK or attempt at coalescing tiny packets.
 * The socket must be locked by the caller.
 */
static __inline__ void tcp_push_pending_frames(struct sock *sk, struct tcp_opt *tp)
{
    if(tp->send_head) {
	if(tcp_snd_test(sk, tp->send_head)) 
	  {
#ifdef DEBUG_WRITE_XMIT
	    tcp_write_xmit2(sk); 
#else
	    tcp_write_xmit(sk);
#endif
	  } else if(tp->packets_out == 0 && !tp->pending) 
	    {
	      /* We held off on this in tcp_send_skb() */
	      tp->pending = TIME_PROBE0;
	      tcp_reset_xmit_timer(sk, TIME_PROBE0, tp->rto);
	    }
    }
}

/* This tells the input processing path that an ACK should go out
 * right now.
 */
#define tcp_enter_quickack_mode(__tp)	((__tp)->ato |= (1<<31))
#define tcp_exit_quickack_mode(__tp)	((__tp)->ato &= ~(1<<31))
#define tcp_in_quickack_mode(__tp)	(((__tp)->ato & (1 << 31)) != 0)

/*
 * List all states of a TCP socket that can be viewed as a "connected"
 * state.  This now includes TCP_SYN_RECV, although I am not yet fully
 * convinced that this is the solution for the 'getpeername(2)'
 * problem. Thanks to Stephen A. Wood <saw@cebaf.gov>  -FvK
 */

extern __inline const int tcp_connected(const int state)
{
    return ((1 << state) &
	    (TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|
	     TCPF_FIN_WAIT2|TCPF_SYN_RECV));
}

extern __inline const int tcp_established(const int state)
{
    return ((1 << state) &
	    (TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|
	     TCPF_FIN_WAIT2));
}


extern void			tcp_destroy_sock(struct sock *sk);


/*
 * Calculate(/check) TCP checksum
 */
static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
				   unsigned long saddr, unsigned long daddr, 
				   unsigned long base)
{
    return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
}

#undef STATE_TRACE

#ifdef STATE_TRACE
static char *statename[]={
    "Unused","Established","Syn Sent","Syn Recv",
    "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
    "Close Wait","Last ACK","Listen","Closing"
};
#endif

static __inline__ void tcp_set_state(struct sock *sk, int state)
{
    int oldstate = sk->state;

/* Replace with standard Linux code -S*/
    switch (state) {
    case TCP_ESTABLISHED:
	if (oldstate != TCP_ESTABLISHED) //no notion of stats -S
	    //TCP_INC_STATS(TcpCurrEstab);
	    ;
	break;

    case TCP_CLOSE:
	sk->prot->unhash(sk);

	/* Handled separately ?
	   if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))
	   tcp_put_port(sk);
	*/
	/* fall through */
    default:
	if (oldstate==TCP_ESTABLISHED) //again no stats -S
	    //tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
	    ;
    }


/* This is messy and undecipherable. All established sockets lose their hash
 * contents upon a call here??? -S
 if ( state == TCP_ESTABLISHED )
 {
 sk->prot->unhash(sk);
 }
*/
    /* Change state AFTER socket is unhashed to avoid closed
     * socket sitting in hash tables.
     */
    sk->state = state;

#ifdef STATE_TRACE
    SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
#endif	
}

static __inline__ void tcp_done(struct sock *sk)
{
    sk->shutdown = SHUTDOWN_MASK;

    if (!sk->dead) 
	sk->state_change(sk);
    else
	tcp_destroy_sock(sk);
}

static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp,__u32 data_seq)
{
    if (tp->tstamp_ok) {
	*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
				  (TCPOPT_NOP << 16) |
				  (TCPOPT_TIMESTAMP << 8) |
				  TCPOLEN_TIMESTAMP);
	*ptr++ = htonl(tstamp);
	*ptr++ = htonl(tp->ts_recent);
    }
    if(tp->sack_ok && tp->num_sacks) {
	int this_sack;

	*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
				  (TCPOPT_NOP << 16) |
				  (TCPOPT_SACK << 8) |
				  (TCPOLEN_SACK_BASE +
				   (tp->num_sacks * TCPOLEN_SACK_PERBLOCK)));
	for(this_sack = 0; this_sack < tp->num_sacks; this_sack++) {
	    *ptr++ = htonl(tp->selective_acks[this_sack].start_seq);
	    *ptr++ = htonl(tp->selective_acks[this_sack].end_seq);
	}
    }

   *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
		      (TCPOPT_NOP << 16) |
		      (TCPOPT_DATA_SEQ << 8) |
		      6);

   *ptr++ = htonl(data_seq);
}

/* Construct a tcp options header for a SYN or SYN_ACK packet.
 * If this is every changed make sure to change the definition of
 * MAX_SYN_SIZE to match the new maximum number of options that you
 * can generate.
 */
 

/* Determine a window scaling and initial window to offer.
 * Based on the assumption that the given amount of space
 * will be offered. Store the results in the tp structure.
 * NOTE: for smooth operation initial space offering should
 * be a multiple of mss if possible. We assume here that mss >= 1.
 * This MUST be enforced by all callers.
 */
extern __inline__ void tcp_select_initial_window(int space, __u32 mss,
						 __u32 *rcv_wnd,
						 __u32 *window_clamp,
						 int wscale_ok,
						 __u8 *rcv_wscale)
{
    /* If no clamp set the clamp to the max possible scaled window */
    if (*window_clamp == 0)
	(*window_clamp) = (65535<<14);
    space = min(*window_clamp,space);

    /* Quantize space offering to a multiple of mss if possible. */
    if (space > mss)
	space = (space/mss)*mss;

    /* NOTE: offering an initial window larger than 32767
     * will break some buggy TCP stacks. We try to be nice.
     * If we are not window scaling, then this truncates
     * our initial window offering to 32k. There should also
     * be a sysctl option to stop being nice.
     */
    (*rcv_wnd) = min(space, MAX_WINDOW);
    (*rcv_wscale) = 0;
    if (wscale_ok) {
	/* See RFC1323 for an explanation of the limit to 14 */
	while (space > 65535 && (*rcv_wscale) < 14) {
	    space >>= 1;
	    (*rcv_wscale)++;
	}
	(*rcv_wscale)++;
    }
    /* Set the clamp no higher than max representable value */
    (*window_clamp) = min(65535<<(*rcv_wscale),*window_clamp);
}

/* Note: caller must be prepared to deal with negative returns */ 
extern __inline__ int tcp_space(struct sock *sk)
{
    return (sk->rcvbuf - atomic_read(&sk->rmem_alloc)) / 
	WINDOW_ADVERTISE_DIVISOR; 
} 

int multipath_tcp_space(struct multipath_pcb *mpcb);
int multipath_tcp_full_space( struct multipath_pcb* mpcb);

extern __inline__ int tcp_full_space( struct sock *sk)
{
    return sk->rcvbuf / WINDOW_ADVERTISE_DIVISOR; 
}

extern __inline__ void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req, struct open_request *prev)
{
    if(!req->dl_next)
	tp->syn_wait_last = (struct open_request **)prev;
    prev->dl_next = req->dl_next;
}

extern __inline__ void tcp_synq_queue(struct tcp_opt *tp, struct open_request *req)
{ 
    req->dl_next = NULL;
    *tp->syn_wait_last = req; 
    tp->syn_wait_last = &req->dl_next;
}

extern __inline__ void tcp_synq_init(struct tcp_opt *tp)
{
    tp->syn_wait_queue = NULL;
    tp->syn_wait_last = &tp->syn_wait_queue;
}

extern void __tcp_inc_slow_timer(struct tcp_sl_timer *slt);
extern __inline__ void tcp_inc_slow_timer(int timer)
{
    struct tcp_sl_timer *slt = &tcp_slt_array[timer];
	
    if (atomic_read(&slt->count) == 0)
    {
	__tcp_inc_slow_timer(slt);
    }

    atomic_inc(&slt->count);
}

extern __inline__ void tcp_dec_slow_timer(int timer)
{
    struct tcp_sl_timer *slt = &tcp_slt_array[timer];

    atomic_dec(&slt->count);
}

extern const char timer_bug_msg[];

static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
{
    struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
    struct timer_list *timer;
	
    switch (what) {
    case TIME_RETRANS:
	timer = &tp->retransmit_timer;
	break;
    case TIME_DACK:
	timer = &tp->delack_timer;
	break;
    case TIME_PROBE0:
	timer = &tp->probe_timer;
	break;	
    default:
	printk(timer_bug_msg);
	return;
    };

    spin_lock_bh(&sk->timer_lock);
    if ( del_timer(timer) ) __sock_put(sk);
    spin_unlock_bh(&sk->timer_lock);
}

/* This function does not return reliable answer. You is only as advice.
 */

static inline int tcp_timer_is_set(struct sock *sk, int what)
{
    struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
    int ret;

    switch (what) {
    case TIME_RETRANS:
	ret = tp->retransmit_timer.index != -1;
	break;
    case TIME_DACK:
	ret = tp->delack_timer.index != -1;
	break;
    case TIME_PROBE0:
	ret = tp->probe_timer.index != -1;
	break;	
    default:
	ret = 0;
	printk(timer_bug_msg);
    };
    return ret;
}


extern void tcp_listen_wlock(void);

/* - We may sleep inside this lock.
 * - If sleeping is not required (or called from BH),
 *   use plain read_(un)lock(&tcp_lhash_lock).
 */

extern __inline__ void tcp_listen_lock(void)
{
    /* read_lock synchronizes to candidates to writers */
    read_lock(&tcp_lhash_lock);
    atomic_inc(&tcp_lhash_users);
    read_unlock(&tcp_lhash_lock);
}

extern void tcp_listen_unlock(void);

static inline int keepalive_intvl_when(struct tcp_opt *tp)
{
    if (tp->keepalive_intvl)
	return tp->keepalive_intvl;
    else
	return sysctl_tcp_keepalive_intvl;
}

static inline int keepalive_time_when(struct tcp_opt *tp)
{
    if (tp->keepalive_time)
	return tp->keepalive_time;
    else
	return sysctl_tcp_keepalive_time;
}

//extern struct net_device *ultcp_interface; // -P2



enum is_active{
    ON,
    OFF
};

enum tcp_icodes{
    /*State-less Events*/
    I_RST_RECEIVED,
    I_HIGH_RTT_VARIANCE,
    I_SYN_LOSS,
    I_SYN_RECVD,
    I_START_SYN_COOKIES,
    I_END_SYN_COOKIES,
    /*Stateful Events*/
    I_BURST_LOSS,
    I_BURST_DUPLICATES,
    I_BURST_REORDERED_PACKETS,
    /*Passive Information*/
    I_CONN_TIME_DIVISION
};


// For instrumenting the kernel
struct tcp_i_info
{
    int state; //boolean
    void (*callback) (void *); //callback
    int def;
};
  

  

#endif	/* __USER_TCP_PRIVATE_H */
