#ifndef _NSA_API_H_
#define _NSA_API_H_

#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <asm/io.h>
#include <linux/list.h>

#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#define LINUX2620
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
#define LINUX_NEW_SKBUFF
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
#define LINUX_NEW_SLAB
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
#define LINUX_NEW_GET_DEV
#endif 
#endif

#else
#include <sys/types.h>
#endif
/**
 * messegae debug facility, major data structures share the same debug flag.
 *  for short print, use NPRINTK
 *  for data dump, use NDUMP
 *    NPRINTK and NDUMP can be controlled by module load parameter
 *  for temporarily print, use ndebug
 *    ndebug should be compile out
 */
#define NSA_SSN_DEBUG   1
enum {
	NSA_MSG_DRV		= 0x0001,
	NSA_MSG_NPCP	= 0x0002,
	NSA_MSG_DEV		= 0x0004,
	NSA_MSG_SPI		= 0x0008,
	NSA_MSG_SCI 	= 0x0010,
	NSA_MSG_SSN		= 0x0020,

    NSA_MSG_DEV_DUMP = 0x1000,
    NSA_MSG_SSN_DUMP = 0x2000,
};


#define NPRINTK(who, nlevel, klevel, fmt, args...) \
	(void)((NSA_MSG_##nlevel & (*(who->msg_enable))) && \
	printk(KERN_##klevel "%s: %s: " fmt, #who, \
		__FUNCTION__ , ## args))

#define NDUMP(who, nlevel, how, skb) \
    if(NSA_MSG_##nlevel & (*(who->msg_enable))) { \
    printk( "%s: ", __FUNCTION__); how(skb); }

#ifdef NSA_SSN_DEBUG
#define ndebug(fmt, args...) printk("%s: " fmt, __FUNCTION__, ##args)
#else
#define ndebug(fmt, args...)
#endif

extern int nsa_debug;
#define MAX_SCI_NUMS	4

/**
 * NSA card settings, should keep the same with SempSec's
 *  configuration, which is fixed in sillicon model
 */
#define SSN_HASH_BUCKET_SIZE 		0x4
//#define SSN_HASH_BUCKET_SIZE 		0x400000
#define SSN_HASH_BUCKET_DEPTH		0x4
#define SSN_HASH_BUCKET_SIZE_BITS   22
#define SSN_HASH_BUCKET_SIZE_MASK 	(SSN_HASH_BUCKET_SIZE-1)
#define SSN_HASH_BUCKET_BITS        2
#define SSN_HASH_BUCKET_MASK        (SSN_HASH_BUCKET_DEPTH-1)
#define SSN_INDEX_INVALID			0xffffffff
#define SSN_INDEX_CREATED           0xfffffffe

#define NSA_MAX_IF_NUM              4095
#define NSA_MAX_QOS_ID              4096
#define NSA_MAX_PIF_NUM             8
#define NSA_MAX_LA_NUM              8
#define NSA_MAX_ZONE_NUM            1023
#define NSA_MAX_ACL_NUM				0x10000

#define NSA_AGING_TIMER_DIVIDE      30
#define	NSA_COUNTER_SIZE		(16*1024)

#define NSA_WAIT_TIMES				100
#define NSA_MAX_REGS_PER_PACKET     256

/** 
 * Core data structure: ssn entry and it's cache-able update
 * 
 */

/* _ssn update, portions of ssn entry may be accessed and updated
 *  from host at any time
 */
struct _ssn_update{
	union{
		struct {
		u_int64_t qos_id:12, qos_prio:4, elif:12, rsv1:20,
	            mod_dp:1, mod_sp:1, mod_dip:1, mod_sip:1, mod_smac:1, mod_dmac:1,
				rsv2:6, drop:1, host:1, mirror:1, valid:1;
		u_int64_t vlan_tag:12, rsv4:4, dmacl:16, dmach:32; 
        };
		u_int64_t data[2];
	};
};

/* easy access wrapper for _ssn_update */
struct ssn_update{
	union{
		struct _ssn_update su;
		u_int64_t data[2];
	};
};

/* full body of a single ssn entry */
struct ssn_cell{
	union{
		struct {
		u_int64_t dip:32, sip:32;
		u_int64_t cntenb1:1, rsv1:29, prtcl:2, dp:16, sp:16;
		u_int64_t aging:14,rsv2:2,
				  packet_count:44, rsv3:4;
		u_int64_t byte_count:54,
				  rsv4:9,cntenb2:1;
		struct _ssn_update su;
		u_int64_t new_dip:32, new_sip:32;
		u_int64_t ttl:14, rsv5:18, new_dp:16,
			      new_sp:16;
		};
		u_int64_t data[8];
	};
};

/* typical operation on ssn index, here the naming rule please follow:
 *  index: bucket position
 *  offset: relative offset in bucket
 *  postion or pos: linear address of ssn entry in ssn table 
 */
#define ssn_pos(index, offset) (((index)<<SSN_HASH_BUCKET_BITS)|(offset))
#define ssn_index(pos)          ((pos)>>SSN_HASH_BUCKET_BITS)
#define ssn_offset(pos)         ((pos)&SSN_HASH_BUCKET_MASK)

/** 
 * NSA message headers between SempSec and host, a common 
 * 32-byte header plus specified type header(or content) 
 */

/* common header: 32-byte long, application should access all 
 * fields using macro. 
 */

/* 1. ether type should be 0x8050, otherwise LLC 
   2. byte order: 64 byte, reverse order
   3. proto type position: ilif-rsv11 
 */
struct nsa_header{
    u_int64_t cache:8, rsv5:12, pkttype:4, type:8, qospri:4, rsv4:2, qodcolor:2,
        epif:3, rsv3:1, ipif:3, rsv2:1, len:11, rsv1:1, cmd:4;
    u_int64_t qosid:12, rsv12:4, zone:10, zval:1, limited:1, laid:3, laval:1,
        ilif:12, rsv11:4, elif:12, rsv10:4;
    u_int64_t acl_id:16, rsv8:8, cause_id:8, ssn_id:24, rsv7:4, 
            acl_avl:1, ssn_avl:1, rsv6:2;
   u_int64_t rsv9:32, nsa_tx:16, host_tx:16;
} ;


#define NSA_HEADER_LEN  sizeof(struct nsa_header)
#define nsa_dev_header(skb)   (struct nsa_header*)((skb)->m_data)
#define nsa_dev_pkttype(head) (head)->pkttype
#define nsa_dev_len(head) (head)->len
#define nsa_dev_cache(head) (head)->cache
/*  normally, SempSec requires ingress use pif and egress use lif, 
 *  it is the same way as protocol stack's behavior.
 */
#define nsa_dev_i_pif(head)  (head)->ipif
#define nsa_dev_e_lif(head)  (head)->elif
#define nsa_dev_i_lif(head)  (head)->ilif
#define nsa_dev_e_pif(head)  (head)->epif
#define nsa_dev_qos_prio(head) (head)->qospri
#define nsa_dev_qos_id(head) (head)->qosid
#define nsa_dev_acl_id(head)        (head)->acl_id
#define nsa_dev_acl_valid(head)     (head)->acl_avl
#define nsa_dev_ssn_id(head)        (head)->ssn_id
#define nsa_dev_ssn_valid(head)     (head)->ssn_avl
#define nsa_dev_cause_id(head)      (head)->cause_id

/* NSA cause ID, software will only care these three */
#define CAUSE_FIRST_PKT			0x10
#define CAUSE_HOST				0x11
#define CAUSE_MIRROR			0x12

/* SSN entry configuration message header */
struct _ssn_head{
        u_int64_t pos_1:24,
                  rsv_1:4,
                  mask_1:4,
				  pos_0: 24,
				  rsv_0 :4,
				  mask_0:4;

};
struct nsa_ssn_head {
    struct _ssn_head entry_map[4];
};

/* typical ssn entry operation: form add/mod/del messages in skb */
/**
 *  offset 
 *
 */
#define nsa_ssn_entry_add(skb, _index, _pos_0, pdata_0, _pos_1, pdata_1) \
    do{ \
        struct _ssn_head *map = ((struct nsa_ssn_head*)(skb->data))->entry_map;   \
        u_int64_t *_data=(u_int64_t*)((u_int8_t*)map+sizeof(struct nsa_ssn_head)); \
        if(!_index) { memset(map, 0, sizeof(struct nsa_ssn_head));  skb->len+= sizeof(struct nsa_ssn_head);}\
        map[_index].pos_0 = _pos_0;   \
        map[_index].pos_1 = _pos_1;   \
        memcpy(&_data[_index*16], pdata_0, 8*8);   \
        memcpy(&_data[_index*16+8], pdata_1, 8*8);   \
		skb->len += 2*(sizeof(struct ssn_cell));    \
    }while (0);

#define nsa_ssn_entry_mod(skb, _index,  _pos_0, pdata_0, _pos_1, pdata_1) \
    do{ \
        struct _ssn_head *map = ((struct nsa_ssn_head*)(skb->data))->entry_map;   \
        u_int64_t *_data=(u_int64_t*)((u_int8_t*)map+sizeof(struct nsa_ssn_head)); \
        if(!_index) { memset(map, 0, sizeof(struct nsa_ssn_head));  skb->len+= sizeof(struct nsa_ssn_head);}\
        map[_index].pos_0 = _pos_0;   \
        map[_index].pos_1 = _pos_1;   \
        map[_index].mask_0 = map[_index].mask_1 = 0xb;   /* 0b1011 */ \
        memcpy(&_data[_index*16+4], pdata_0, 8*2);   \
        memcpy(&_data[_index*16+8+4], pdata_1, 8*2);   \
        skb->len += 2*(sizeof(struct ssn_cell));    \
    }while (0);

#define nsa_ssn_entry_extend_aging(skb, _index,  _pos_0, pdata_0, _pos_1, pdata_1) \
    do{ \
        struct _ssn_head *map = ((struct nsa_ssn_head*)(skb->data))->entry_map;   \
        u_int64_t *_data=(u_int64_t*)((u_int8_t*)map+sizeof(struct nsa_ssn_head)); \
        if(!_index) { memset(map, 0, sizeof(struct nsa_ssn_head));  skb->len+= sizeof(struct nsa_ssn_head);}\
        map[_index].pos_0 = _pos_0;   \
        map[_index].pos_1 = _pos_1;   \
        map[_index].mask_0 = map[_index].mask_1 = 0xd;   /* 0b1101 */ \
        memcpy(&_data[_index*16+2], pdata_0, 8*2);   \
        memcpy(&_data[_index*16+8+2], pdata_1, 8*2);   \
        skb->len += 2*(sizeof(struct ssn_cell));    \
    }while (0);


#define nsa_ssn_entry_del(skb, _index, _pos_0, _pos_1) \
    do{ \
        struct _ssn_head *map = ((struct nsa_ssn_head*)(skb->data))->entry_map;   \
        u_int64_t *_data=(u_int64_t*)((u_int8_t*)map+sizeof(struct nsa_ssn_head)); \
        if(!_index) { memset(map, 0, sizeof(struct nsa_ssn_head));  skb->len+= sizeof(struct nsa_ssn_head);}\
        map[_index].pos_0 = _pos_0;   \
        map[_index].pos_1 = _pos_1;   \
        memset(&_data[_index*16], 0, 8*8);   \
        memset(&_data[_index*16+8], 0, 8*8); \
        skb->len += 2*(sizeof(struct ssn_cell));    \
    }while (0);

/* Aging and stating messeage header from NSA to host */ 
struct nsa_ssn_stat_head {
     u_int64_t packet2:36, offset:2, index:22, 
		 rsv1:2, cntval:1, ageval:1;
     u_int64_t byte:54, rsv2:2, packet1:8; 
};

/*  typical ssn stat operation on skb 
 *
 *  u_int8_t data=(u_int8_t *)(skb->data);
 *  for(i=0; i<nsa_ssn_stat_entries(skb); i++){
 *    if (nsa_ssn_stat_aged(data))
 *       do_sth_with(nsa_ssn_stat_packets(data), \
 *                   nsa_ssn_stat_bytes(data));
 *    nsa_ssn_stat_next_entry(data);
 *  }
 */
#define nsa_ssn_stat_entries(skb) (skb->len/sizeof(struct nsa_ssn_stat_head))
#define nsa_ssn_stat_aged(data)  ((struct nsa_ssn_stat_head*)(data))->ageval
#define nsa_ssn_stat_counted(data) ((struct nsa_ssn_stat_head*)(data))->cntval
#define nsa_ssn_stat_index(data)  ((struct nsa_ssn_stat_head*)(data))->index
#define nsa_ssn_stat_offset(data)  ((struct nsa_ssn_stat_head*)(data))->offset
#define nsa_ssn_stat_next_entry(data) data+=sizeof(struct nsa_ssn_stat_head)
#define nsa_ssn_stat_bytes(data)  ((struct nsa_ssn_stat_head*)(data))->byte
#define nsa_ssn_stat_packets(data) \
			((((struct nsa_ssn_stat_head*)(data))->packet1) | (((struct nsa_ssn_stat_head*)(data))->packet2)<<8)


/* counter */
#define nsa_counter_nums(skb) (skb->len/sizeof(u_int64_t))
#define nsa_counter(data, idx)  ((u_int64_t*)data)[idx]


/** 
 * NSA logical access data structure, can be used directly 
 *  from both nsa_dev interface and user application (aka. a 
 *  user-level wrapper for nsa_dev)
 * 
 */

/* version read */
struct nsa_version{
    int nsa_id;
	int partnum;
    int version, revision;
	int year, month, day;
	int model;
	int PCB_Version;
	int PCB_Type;
    int build;
	int EPLD_Version;
	u_int64_t dna;
};

/* flash operation (erase/read/write) */
struct nsa_flash{
    int nsa_id; 
#define FLASH_ACTION_READ           0x1
#define FLASH_ACTION_ERASE          0x3
#define FLASH_ACTION_WRITE          0x4
#define FLASH_ACTION_RECONFIG		0x5
#define FLASH_OPR_SIZE              32
	u_int16_t action;
	u_int16_t length;
	u_int32_t address;
	u_int8_t data[FLASH_OPR_SIZE];
};

/* acl operation (read/write) */
struct nsa_acl{
    u_int16_t nsa_id;
    u_int16_t acl_id;
#define ACL_ACTION_READ         0x1
#define ACL_ACTION_WRTIE        0x2
#define ACL_ACTION_FLUSH        0x3
#define ACL_ACTION_DOWNLOAD     0x4
#define ACL_ACTION_SETLOG       0x5
    int action;

#define ACL_PROTOCOL_TCP     	0x1
#define ACL_PROTOCOL_UDP      	0x2
	int protocol;
	u_int32_t dip,dipm,sip,sipm;
	u_int16_t dps,dpe,sps,spe;
#define ACL_IF_LIF	0
#define ACL_IF_ZONE	1
	u_int16_t if_type;
	u_int16_t if_data;	

#define ACL_PROC_DENY			0x1
#define ACL_PROC_TAG			0x2
	int proc;
#define ACL_MIRROR_ENABLE          0x1
#define ACL_MIRROR_DISABLE         0x0
    int mirror;
    /* pointer to host software structure */
    caddr_t host_policy;
};

/* acl operation (read/write) */
struct nsa_qos{
    u_int16_t nsa_id;
    u_int16_t qos_id;
#define QOS_ACTION_READ         0x1
#define QOS_ACTION_WRTIE        0x2
    int action;
    int min;
    int max;
    int prio;
};
#define NSA_QOS_ID_INVALID      0
#define NSA_QOS_PRIO_PLAT       1
#define NSA_QOS_PRIO_GOLD       2
#define NSA_QOS_PRIO_SILV       3
#define NSA_QOS_PRIO_COPP       4

/* ssn entry opertion (read/write) by slow path(nsa_dev) */
struct nsa_ssn{
    int nsa_id;
#define SSN_ACTION_SET		0
#define SSN_ACTION_GET		1
#define SSN_ACTION_ADD		2
#define SSN_ACTION_DEL		3
#define SSN_ACTION_MOD		4
#define SSN_ACTION_TEST		5
#define SSN_ACTION_CLEAR 	6

#define SSN_ACTION_DEBUG	0xff
	int action;
    int start;  /* start 64-byte position */
    int len;    /* how many 64-byte */
    u_int32_t pos;
    u_int64_t data[8];

};

/* register operation (read/write/write wait*/
struct nsa_reg{
#define NSA_REG_WAIT        0x8000000

#define NSA_SSR_ANTI_ATCK   0x110020
#define NSA_SSR_PKTE_CTRL   0x0e0000
#define NSA_SSR_SPM_CTRL    0x140000
    int reg;
#define NSA_ANTI_WINNUKE    0x00400000
#define NSA_ANTI_NO_ACK_FIN 0x00200000
#define NSA_ANTI_NL_FLG_TCP 0x00100000
#define NSA_ANTI_XMAS       0x00080000
#define NSA_ANTI_LARGE_ICMP 0x00040000
#define NSA_ANTI_LAND_ATCK  0x00020000
#define NSA_ANTI_IP_OPTION  0x00010000
#define NSA_ANTI_ICMP_VAL   0x0000ffff 

#define NSA_PKTE_ATCK_LOG   0x00000010
#define NSA_PKTE_LOG        0x00000008
#define NSA_PKTE_DLV_CPU    0x00000004
#define NSA_PKTE_PKT_HOST   0x00000002
#define NSA_PKTE_PKT_BYPASS 0x00000001

#define NSA_SPM_INIT_DONE   0x00000020
#define NSA_SPM_AGE_ENABLE  0x00000010
#define NSA_SPM_STAT_ENABLE 0x00000008
#define NSA_SPM_ALC_ENABLE  0x00000004 
    u_int32_t   data;
};

struct nsa_dimm{
#define NSA_DIMM_A      0x00000
#define NSA_DIMM_B      0x10000
    int type;
    int mask;       /* current actual use only 8-bit */
    u_int32_t addr;       
    u_int64_t data[8];
};

struct nsa_rldram{
#define NSA_RLDRAM_A    0x00000
#define NSA_RLDRAM_B    0x10000
    int type;
    int mask;
    u_int32_t addr;
    u_int16_t data[2];
};

struct nsa_tcam{
/* tcam only support 144-bit operation in software */
    u_int32_t tcam_act;
#define NSA_TCAM_ACT_DATA           0x0U
#define NSA_TCAM_ACT_MASK           0x1U
#define NSA_TCAM_ACT_REG            0x3U

#define NSA_TCAM_SEG0   0
    int seg; /* which seg?? fixme*/
    int hit;
    u_int32_t   addr;
    u_int8_t data[18];
};

struct nsa_mdio{
    u_int16_t ifn;
    u_int16_t reg_mdio;
    u_int16_t data;
};

struct nsa_counter{
    u_int32_t counter_address;
    u_int64_t data;
};


#define NSA_EEPROM_INFO_ADDR	0xa0
struct nsa_spm{
#define NSA_SPM_ID_ICMP_FLD     0x1
#define NSA_SPM_ID_UDP_FLD      0x2
#define NSA_SPM_ID_INGRESS_LIF  0x3
#define NSA_SPM_ID_INGRESS_LIFP 0x4
#define NSA_SPM_ID_ACL          0x5
#define NSA_SPM_ID_EGRESS_LIFP  0x6
#define NSA_SPM_ID_PIF_MAC_L32  0x7
#define NSA_SPM_ID_PIF_MAC_H16  0x8
#define NSA_SPM_ID0             NSA_SPM_ID_PIF_MAC_H16
#define NSA_SPM_ID_QOS_MIN      0x9
#define NSA_SPM_ID_QOS_MAX      0xa
#define NSA_SPM_ID_MIN          NSA_SPM_ID_ICMP_FLD
#define NSA_SPM_ID_MAX          NSA_SPM_ID_QOS_MAX
    int spm_id;
    int addr;
    int data;
};

struct nsa_status{
	u_int32_t status;
#define NSA_STATUS_TEST							0x0
#define NSA_STATUS_NORMAL						0x1
#define NSA_STATUS_RCONFIG						0x2
#define NSA_SATUS_TX_KTHREAD					0x4
#define NSA_STATUS_FLASH_UPGRADING 				0x10
#define NSA_STATUS_RESTORE_AFTER_FLASH			0xf
	int hw_type;
};

struct nsa_hw_op{
	int nsa_id;
	int action;
#define NSA_HW_ACTION_READ         	0
#define NSA_HW_ACTION_WRITE        	1
#define NSA_HW_ACTION_WRITE_WAIT   	2
#define NSA_HW_ACTION_FLUSH         3
#define NSA_HW_ACTION_SEARCH        4
	int hw_type;
#define NSA_HW_TYPE_REG			    0
#define NSA_HW_TYPE_DIMM			1
#define NSA_HW_TYPE_SPM				2
#define NSA_HW_TYPE_TCAM			3
#define NSA_HW_TYPE_RLDRAM			4
#define NSA_HW_TYPE_MDIO			5
#define NSA_HW_TYPE_COUNTER			6
#define NSA_HW_TYPE_STATUS			7
	u_int8_t data[0];
};


#define NSA_SCI_NAME		"eth0"
struct nsa_dna{
	u_int8_t mac[6];
	u_int8_t tcam;
#define TCAM_VALID 1
#define TCAM_INVALID 0
	u_int8_t base_board_map;
         /* low 4-bit, 0: fiber, 1:copper */
	u_int8_t addon_board_map;
#define ADDON_VALID     0x80
    u_int8_t reserve[7];
	char base_board_sn[24];
	char addon_board_sn[24];
	u_int8_t signature[32];
};

/*for passing EEPROM chunks */
struct ethtool_eeprom{
	__u32 cmd;
	__u32 magic;
	__u32 offset;
	__u32 len;
	__u8  data[0];
}


#define NSACTL_BASE_CTL		128
#define NSACTL_SO_SET_FLASH		(NSACTL_BASE_CTL)
#define NSACTL_SO_SET_ACL		(NSACTL_BASE_CTL + 2)
#define NSACTL_SO_SET_SSN		(NSACTL_BASE_CTL + 3)
#define NSACTL_SO_SET_QOS       (NSACTL_BASE_CTL + 4)
#define NSACTL_SO_SET_HW        (NSACTL_BASE_CTL + 5)
#define NSACTL_SO_SET_MAX		NSACTL_SO_SET_HW

#define NSACTL_SO_GET_VERSION	(NSACTL_BASE_CTL)
#define NSACTL_SO_GET_FLASH		(NSACTL_BASE_CTL + 2)
#define NSACTL_SO_GET_ACL		(NSACTL_BASE_CTL + 3)
#define NSACTL_SO_GET_SSN		(NSACTL_BASE_CTL + 4)
#define NSACTL_SO_GET_QOS       (NSACTL_BASE_CTL + 5)
#define NSACTL_SO_GET_HW        (NSACTL_BASE_CTL + 6)
#define NSACTL_SO_GET_MAX		NSACTL_SO_GET_HW


/**
 * NSA enabler 
 */

/* On host, only ssn_update stored for each ssn entry, in which the
 * fields are frequently accessed by software. 
 */
struct ssn_cache_item{
	struct ssn_update cache;
	caddr_t	host_info;      /* host software's connection tracking */
    void (*host_del)(caddr_t);      /* host aging function */
    void (*host_stat)(caddr_t, u_int32_t, u_int64_t);  /* host statistics function */
    u_int32_t partner_index;
};
#define SSN_AGING(ssn) ((ssn)->partner_index |= 0x80000000)
#define SSN_AGED(ssn) ((ssn)->partner_index & 0x80000000)
#define SSN_AGING_CLEAR(ssn) ((ssn)->partner_index &= ~0x80000000)

struct ssn_cache{
	struct ssn_cache_item* bucket[SSN_HASH_BUCKET_DEPTH];
};

/* ssn table access by nsa enabler , this is fast path access */
struct nsa_ssn_add{
	u_int32_t pos[2];
    struct ssn_cell		ssn[2];
};

struct nsa_ssn_mod{
    u_int32_t pos[2];
    struct ssn_update	mod[2];
};

struct nsa_ssn_del{
	u_int32_t pos[2];
};

/* compute hash value using same algorithm with SempSec.
 * input should be host byte order
 */

#define SSN_IPPROTO_TCP		0x1
#define SSN_IPPROTO_UDP		0x2

static inline u_int32_t nsa_ssn_hash(u_int32_t sip, u_int32_t dip, 
                        u_int16_t sport,u_int16_t dport,u_int8_t protocol)				
{
	uint32_t hash_result = 0x811C9DC5;
	uint32_t ret;

	hash_result ^= (0xFF & sip);
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & (sip >> 8));
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & (sip >> 16));
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & (sip >> 24));
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);


	hash_result ^= (0xFF & dip);
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & (dip >> 8));
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & (dip >> 16));
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & (dip >> 24));
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);


	hash_result ^= (0xFF & sport);
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & (sport >> 8));
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & dport);
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	hash_result ^= (0xFF & (dport >> 8));
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);


	hash_result ^= (0xFF & protocol);
	hash_result += (hash_result << 1) + (hash_result << 4) + (hash_result << 7) + 
		(hash_result << 8) + (hash_result << 24);

	ret= (hash_result & (SSN_HASH_BUCKET_SIZE_MASK)) ^ ((hash_result >> SSN_HASH_BUCKET_SIZE_BITS) & SSN_HASH_BUCKET_SIZE_MASK);

	return ret;
}

#ifdef __KERNEL__
struct nsa_dev;
struct nsa_enabler{
    /* device access index */
    struct nsa_dev *nsa_dev;    /* NSA device pointer*/
    u_int32_t nsa_id;           /* NSA device index */
    int *msg_enable;            /* debug message */

    /* SSN cache */
    struct ssn_cache _nsa_ssn_cache[SSN_HASH_BUCKET_SIZE];
    struct ssn_cache_item **nsa_ssn_cache; 
//    struct kmem_cache *nsa_ssn_entry_cachep;

    /* logical interface */
    struct net_device *nsa_lif_pool[NSA_MAX_IF_NUM];
    u_int16_t vlan_map[NSA_MAX_PIF_NUM][NSA_MAX_IF_NUM];
    u_int32_t lif_resource;

    /* debug and statitics */
    int nsa_enabler_counter[32];
    u_int64_t ssn_0_full, ssn_1_full, ssn_full;
    u_int64_t ssn_entries, ssn_entries_max, ssn_entries_total;

    /* NSA device interface functions regsitered 
     * by nsa device initilization, if the 1st parameter is nsa_dev,
     * nsa enabler will do extra things to call nsa device's function.
     * if the 1st parameter is nsa_enabler, application can call it 
     * directly using (*enabler->...)*/
    int (*add_ssn)(struct nsa_dev*, u_int32_t *, struct ssn_cell *, 
                   void (*call_after_nok)(struct nsa_dev *, u_int32_t));
    int (*mod_ssn)(struct nsa_dev*, u_int32_t *, struct ssn_update *);
    int (*del_ssn)(struct nsa_dev*, u_int32_t *, 
                   void (*call_after_ok)(int, u_int32_t));

    int (*add_lif)(struct nsa_dev*, int lif, struct net_device *, int vid);
    int (*del_lif)(struct nsa_dev*, int lif, struct net_device *, int vid);
    int (*set_lif_limit)(struct nsa_dev *, int lif, int limit);
    int (*set_lif_zone)(struct nsa_dev *, int lif, int zone);
    int (*add_lif_la)(struct nsa_dev *, int lif, struct net_device *, int la_id);
    int (*del_lif_la)(struct nsa_dev *, int lif, struct net_device *, int la_id);

    int (*nsa_get_version)(int nsa_id, struct nsa_version *);
    int (*nsa_get_ssn)(int nsa_id, u_int32_t, int, int, u_int64_t*);
    int (*nsa_set_ssn)(int nsa_id, u_int32_t, int, int, u_int64_t*);
	int (*nsa_input_ssn)(int nsa_id, int, u_int64_t*,u_int32_t *);

    /* add/del acl in tcam, to set acl log information for an acl in tcam */
    int (*nsa_get_acl)(int nsa_id, int index, struct nsa_acl *);
    int (*nsa_set_acl)(int nsa_id, int index, struct nsa_acl *);
    int (*nsa_set_acl_property)(int nsa_id, int index, int log, int action);
    int (*nsa_dev_clear_acl)(int nsa_id);
    
    /* s/get qos id's max and min bandwidth */
    int (*nsa_set_qos)(int nsa_id, int qos_id, u_int32_t max, u_int32_t min, int prio);
    int (*nsa_get_qos)(int nsa_id, int qos_id, u_int32_t *max, u_int32_t *min, int *prio);

    int (*nsa_set_flood)(int nsa_id, int ifn, int type, int enable, int max_per_sec);
    int (*nsa_get_flood)(int nsa_id, int ifn, int type, int *enable, int *max_per_sec);

    /* eh... directly call hw functions, but can be used in application */
    int (*nsa_access_register)(struct nsa_dev *, int action, int reg, u_int32_t* data);
    int (*nsa_access_flash)(struct nsa_dev*, u_int16_t,
                         u_int32_t, u_int16_t, u_int8_t*);
    int (*nsa_access_dimm)(struct nsa_dev *, int action, int type, u_int8_t mask ,
                           u_int32_t addr, u_int64_t* data);
    int (*nsa_access_rldram)(struct nsa_dev *, int action, int type, u_int8_t mask, 
                           u_int32_t addr, u_int16_t *data);
    int (*nsa_access_spm)(struct nsa_dev *nsa, int action, u_int8_t table_id, u_int32_t addr, u_int32_t *data);
    int (*nsa_access_tcam)(struct nsa_dev *nsa, int action, int seg, 
                u_int32_t *tcam_addr, u_int32_t tcam_act, int *hit,
                u_int8_t *data);
    int (*nsa_access_mdio)(struct nsa_dev *nsa, int action, u_int16_t ifn, u_int16_t reg, u_int16_t *data);
    int (*nsa_access_counter)(struct nsa_dev *nsa, int action, int num, u_int32_t* addr, u_int64_t *data);
	void (*nsa_access_status_bit)(struct nsa_dev *nsa, u_int32_t *status, int action);
};

/* rx cookie on sk_buff, filled by driver at ingress side  */
struct nsa_rx_info {
	int cause;          /* NSA packet cause */
	int acl_id;         /* NSA's TCAM index of acl for first packet */
	int ssn_id[2];      /* NSA's SSN postion*/
    struct nsa_enabler *enabler;    /* ingress enabler */
    u_int8_t padding[8-4-sizeof(caddr_t)];
};

/* tx cookie on sk_buff, filled by kernel and driver, 
 *  will be commited to NSA at egress or manually updating*/
struct nsa_tx_info {
    struct nsa_enabler *enabler;    /* egress enaber */
	u_int32_t *ssn_id[2];           /* address where host stores ssn index,
                                     * which host software will not know 
                                     * until enabler allocated at egress */
	caddr_t host_info;              /* host sofware's connection tracking*/
    void (*host_del)(caddr_t);      /* host aging function */
    void (*host_stat)(caddr_t, u_int32_t, u_int64_t);  /* host statistics function */

#define TAG_SSN_ADD		1
#define TAG_SSN_MOD		2
#define TAG_SSN_NULL    0
    int tag_id;                     /* is this cookie ssn_add or ssn_mod */
	char data[0];                   /* add/mod data pointer */
};

/* enabler ingress and egress processing */
#define NSA_SSN_PKT_RELEASED	1
#define NSA_SSN_PKT_OK			0
#define NSA_SSN_PKT_NOACTION	-1



/**
 *  
 * API functions provided by NSA enabler 
 *  
 */

/* seek rx cookie from skb */
static inline struct nsa_rx_info * skb_nsa_rxinfo(const struct sk_buff *skb)
{
	if(skb->ss_cookie && (skb->ss_cookie->flag&SS_COOKIE_RX))
		return (struct nsa_rx_info*)(skb->ss_cookie->rx_cookie);
	return NULL;
}
/* seek tx cookie from skb */
static inline struct nsa_tx_info * skb_nsa_txinfo(const struct sk_buff *skb)
{
	if(skb->ss_cookie && (skb->ss_cookie->flag&SS_COOKIE_TX))
		return (struct nsa_tx_info*)(skb->ss_cookie->tx_cookie);
	return NULL;
}

/* seek ssn update data pointer from txinfo */
static inline struct nsa_ssn_mod* txinfo_seek_ssn_update(
    const struct nsa_tx_info *txinfo, struct ssn_update **ssn_update)
{
    struct nsa_ssn_mod *mod;
    if(txinfo->tag_id != TAG_SSN_MOD)
		return NULL;
	mod = (struct nsa_ssn_mod*) (txinfo->data);
	ssn_update[0] = &mod->mod[0];
	ssn_update[1] = &mod->mod[1];
	return mod;
}

/* seek ssn cell data pointer from txinfo */
static inline struct nsa_ssn_add* txinfo_seek_ssn_cell(
    const struct nsa_tx_info *txinfo, struct ssn_cell **ssn_cell)
{
    struct nsa_ssn_add *add;
    if(txinfo->tag_id != TAG_SSN_ADD)
		return NULL;
	add = (struct nsa_ssn_add*) (txinfo->data);
	ssn_cell[0] = &add->ssn[0];
	ssn_cell[1] = &add->ssn[1];
	return add;
}


struct nsa_acl* nsa_acl_compile(caddr_t host_policy);

/** 
 * nsa_is_capable - test an skb capable of NSA flow accelarating
 * 
 * @skb: skb buffer
 * 
 * @return int 1 - yes; 0 - no 
 *  
 * Function will test skb's nsa cookie to see if its flow 
 *  capable of being accelarated, only TCP and UDP flows are
 *  good flows for NSA to accelarate
 */
static inline int nsa_is_capable(const struct sk_buff *skb)
{
    return (skb->ss_cookie != NULL);
}

static inline caddr_t nsa_get_policy_shortcut(const struct sk_buff *skb)
{
    return NULL;
}

/** 
 * nsa_add_flow - allow NSA to add a flow for this skb packet
 * 
 * @skb:
 * @host_info:  host software pointer, used and interpreted by 
 *           host software. e.g. conntrack
 * @host_del:  host software routine to handle aging packets.
 * @host_stat: host software routine to handle statiscs packets/
 * @ossn_storage: where to store orignal ssn index for host 
 *              software control
 * @rssn_storage: where to store reverse ssn index for host 
 *              software control
 * 
 * @return nsa_enabler*: the address of enabler flow belonging 
 * to 
 *  
 * We could allcate ssn immediately in add_flow, but since quite 
 * some fields in ssn may be be modified before hard xmit, to 
 * save bandwidth between nsa and host, we want only one time 
 * update only before hard xmit(egress...). 
 *  
 */
static inline struct nsa_enabler * nsa_add_flow(
    const struct sk_buff *skb, caddr_t host_info, 	
	void (*host_del)(caddr_t host_info),
	void (*host_stat)(caddr_t host_info, u_int32_t b_data, u_int64_t p_data),
    u_int32_t *ossn_storage, u_int32_t *rssn_storage)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    BUG_ON(!txinfo);
#if 0
    if((*ossn_storage == SSN_INDEX_CREATED)||
       (*rssn_storage == SSN_INDEX_CREATED)){
        return txinfo->enabler;
        /* Maybe too quick to send another packet? */
    }
#endif
    skb->ss_cookie->flag |= SS_COOKIE_CONFIRMED;
    txinfo->host_info = host_info;
    txinfo->host_del = host_del;
    txinfo->host_stat = host_stat;
    txinfo->ssn_id[0]=ossn_storage;
    txinfo->ssn_id[1]=rssn_storage;
    if(ossn_storage) {
        *ossn_storage = SSN_INDEX_INVALID;
    }
    if(rssn_storage) {
        *rssn_storage = SSN_INDEX_INVALID;
    }
    return txinfo->enabler;
}
/** 
 * nsa_del_flow - delete flow on an NSA
 * 
 * @enabler - NSA enabler handler
 * @pos - ssn index [0] and [1] 
 * 
 * @return int
 */
static inline int nsa_del_flow(struct nsa_enabler *enabler, u_int32_t *pos)
{
    int flags;
   
    if((pos[0]==SSN_INDEX_INVALID)||(pos[1]==SSN_INDEX_INVALID)) {
        return 0;
    }
	if(! enabler->nsa_ssn_cache[pos[0]]) {
		  printk(KERN_ERR "nsa_del_flow: NULL pos[0] 0x%x\n", pos[0]);
		  return 0;
	}
    if(enabler->nsa_ssn_cache[pos[0]]->host_info)
        enabler->nsa_ssn_cache[pos[0]]->host_info = NULL;

    if(enabler->nsa_ssn_cache[pos[1]]->host_info)
        enabler->nsa_ssn_cache[pos[1]]->host_info = NULL;
	
    if( 0!=(*enabler->del_ssn)(enabler->nsa_dev, pos, NULL)){
        printk("Fail to delete ssn position {0x%x, 0x%x}\n",
               pos[0], pos[1]);
        return -1;
    }
    return 0;
}


/** 
 * nsa_get_flow_enabler - get nsa enabler handler for flow 
 * 
 * @param skb - flow
 * 
 * @return struct nsa_enabler*
 */
static inline struct nsa_enabler * nsa_get_flow_enabler(const struct sk_buff *skb)
{
    
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    BUG_ON(!txinfo);
    return txinfo->enabler;
}


/** 
 * nsa_set_flow_state - set flow's state bit for a new flow
 * 
 * @skb
 * @org: state value for orignal ssn's state, 
 *          (1, 0) new state, <0 keep default state
 * @rvs: state value for reverse ssn's state, 
 *          (1, 0) new state, <0 keep default state
 *  
 * Set flow's state bit, state 0 means SempSec will mirror 
 * all the packets in flow to host software, 1 means SempSec 
 * will take care of packets itself. 
 *  
 * If block, host and state are all set, block>host>state
 */

static inline void nsa_set_flow_mirror(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
	struct ssn_cell *add[2];
    if(!txinfo_seek_ssn_cell(txinfo, add))
		return;

	/* ssn shouldn't be NULL, otherwise couldn't seek update*/
	if(org>=0) {
		add[0]->su.mirror=org;
	}
	if(rvs>=0){
		add[1]->su.mirror=rvs;
	}
}


/** 
 * nsa_set_flow_host - set flow's host bit
 * 
 * @skb
 * @org: state value for orignal ssn's host, 
 *          (1, 0) new host, <0 keep default host
 * @rvs: state value for reverse ssn's state, 
 *          (1, 0) new host, <0 keep default host
 *  
 * Set flow's host bit, host 1 means SempSec will give all 
 * the packets in flow to host software to process, 0 means 
 * SempSec will take care of packets itself. 
 *  
 * If block, host and state are all set, block>host>state 
 */
static inline void nsa_set_flow_host(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    struct ssn_cell *add[2];
    if(!txinfo_seek_ssn_cell(txinfo, add))
        return;

    /* ssn shouldn't be NULL, otherwise couldn't seek update*/
    if(org>=0) {
        add[0]->su.host=org;
    }
    if(rvs>=0){
        add[1]->su.host=rvs;
    }
}

/** 
 * nsa_update_flow_block - set flow's block bit
 * 
 * @skb
 * @org: state value for orignal ssn's block, 
 *          (1, 0) new block, <0 keep default block
 * @rvs: state value for reverse ssn's state, 
 *          (1, 0) new block, <0 keep default block
 *  
 * Set flow's block bit, block 1 means SempSec will block all
 * the packets in flow, 0 means SempSec will process take care 
 * of packets itself. 
 *  
 * If block, host and state are all set, block>host>state 
 */

static inline void nsa_set_flow_block(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    struct ssn_cell *add[2];
 
    if(!txinfo_seek_ssn_cell(txinfo, add))
        return;
    /* ssn shouldn't be NULL, otherwise couldn't seek update*/
    if(org>=0) {
        add[0]->su.drop = org;
        #if 0
        if(org) {
            add[0]->su.mirror = 1;
        }
        #endif
    }
    if(rvs>=0){
        add[1]->su.drop = rvs;
        #if 0
        if(rvs) {
            add[1]->su.mirror = 1;
        }
        #endif
    }
}

/** 
 * nsa_update_flow_state - update flow's state bit
 * 
 * @skb
 * @org: state value for orignal ssn's state, 
 *          (1, 0) new state, <0 keep old state
 * @rvs: state value for reverse ssn's state, 
 *          (1, 0) new state, <0 keep old state
 *  
 * Update flow's state bit, state 0 means SempSec will mirror 
 * all the packets in flow to host software, 1 means SempSec 
 * will take care of packets itself. 
 *  
 * If block, host and state are all set, block>host>state
 */
static inline void nsa_update_flow_mirror(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
	struct ssn_update *mod[2];
    if(!txinfo_seek_ssn_update(txinfo, mod))
		return;

	/* ssn shouldn't be NULL, otherwise couldn't seek update*/
	if(org>=0) {
		mod[0]->su.mirror=org;
	}
	if(rvs>=0){
		mod[1]->su.mirror=rvs;
	}
}

/** 
 * nsa_update_flow_host - update flow's host bit
 * 
 * @skb
 * @org: state value for orignal ssn's host, 
 *          (1, 0) new host, <0 keep old host
 * @rvs: state value for reverse ssn's state, 
 *          (1, 0) new host, <0 keep old host
 *  
 * Update flow's host bit, host 1 means SempSec will give all 
 * the packets in flow to host software to process, 0 means 
 * SempSec will take care of packets itself. 
 *  
 * If block, host and state are all set, block>host>state 
 */

static inline void nsa_update_flow_host(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    struct ssn_update *mod[2];
    if(!txinfo_seek_ssn_update(txinfo, mod))
        return;

    /* ssn shouldn't be NULL, otherwise couldn't seek update*/
    if(org>=0) {
        mod[0]->su.host=org;
    }
    if(rvs>=0){
        mod[1]->su.host=rvs;
    }
}
/** 
 * nsa_update_flow_block - update flow's block bit
 * 
 * @skb
 * @org: state value for orignal ssn's block, 
 *          (1, 0) new block, <0 keep old block
 * @rvs: state value for reverse ssn's state, 
 *          (1, 0) new block, <0 keep old block
 *  
 * Update flow's block bit, block 1 means SempSec will block all
 * the packets in flow, 0 means SempSec will process take care 
 * of packets itself. 
 *  
 * If block, host and state are all set, block>host>state 
 */

static inline void nsa_update_flow_block(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    struct ssn_update *mod[2];
 
    if(!txinfo_seek_ssn_update(txinfo, mod))
        return;
    /* ssn shouldn't be NULL, otherwise couldn't seek update*/
    if(org>=0) {
        mod[0]->su.drop = org;
    }
    if(rvs>=0){
        mod[1]->su.drop = rvs;
    }
}
/** 
 * nsa_refresh_flow - refresh flow update flags for skb 
 * 
 * @skb 
 *  
 * Once host software indicates that the flow is able to pass 
 * and be accelarated, host software should use this funtion to 
 * tell enabler's egress routine. 
 *  
 * To make update taking effect on SempSec, host software 
 * information need call this function when updated called 
 * previously 
 */
static inline void nsa_refresh_flow(const struct sk_buff *skb)
{
    if(skb->ss_cookie){
		skb->ss_cookie->flag |= SS_COOKIE_CONFIRMED;
	}
}

/** 
 * nsa_sync_flow - synchronize updated information
 * 
 * @skb
 *  
 *  Synchronize updated ssn entry to SempSec immediately
 */
void nsa_sync_flow(const struct sk_buff *skb);

/** 
 * nsa_sync_flow_done - mark flow synced, so that no need to 
 * sync again in egress 
 * 
 * @skb
 */
static inline void nsa_sync_flow_done(const struct sk_buff *skb)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    txinfo->tag_id = TAG_SSN_NULL; /* synced*/
}

/** 
 * nsa_set_flow_aging - set aging parameters for flow
 * 
 * @skb 
 * @org: original ssn aging parameters in NSA_AGING_TIMER_DIVIDE 
 *     seconds, if <0, default
 * @rvs: reverse ssn aging parameters in NSA_AGING_TIMER_DIVIDE 
 *     seconds, if <0, default
 */
static inline void nsa_set_flow_aging(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    struct ssn_cell *add[2];

    if(!txinfo_seek_ssn_cell(txinfo, add))
        return;
    /* ssn shouldn't be NULL, otherwise couldn't seek update*/
    if(org>=0) {
        add[0]->aging = add[0]->ttl=org;
    }
    if(rvs>=0){
        add[1]->aging = add[1]->ttl=rvs;
    }

}


/** 
 * nsa_set_flow_qod_id - set qos_id parameters for flow
 * 
 * @skb 
 * @org: original ssn qos_id parameters, if <0, default
 * @rvs: reverse ssn qos_id parameters, if <0, default
 */
static inline void nsa_set_flow_qos_id(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    struct ssn_cell *add[2];

    if(!txinfo_seek_ssn_cell(txinfo, add))
        return;
    /* ssn shouldn't be NULL, otherwise couldn't seek update*/
    if(org>=0) {
        add[0]->su.qos_id=org;
    }
    if(rvs>=0){
        add[1]->su.qos_id=rvs;
    }
}

/** 
 * nsa_set_flow_qod_prio - set qos_prio parameters for flow
 * 
 * @skb 
 * @org: original ssn qos_prio parameters, if <0, default
 * @rvs: reverse ssn qos_prio parameters, if <0, default
 */
static inline void nsa_set_flow_qos_prio(const struct sk_buff *skb, int org, int rvs)
{
    struct nsa_tx_info *txinfo = skb_nsa_txinfo(skb);
    struct ssn_cell *add[2];

    if(!txinfo_seek_ssn_cell(txinfo, add))
        return;
    /* ssn shouldn't be NULL, otherwise couldn't seek update*/
    if(org>=0) {
        add[0]->su.qos_prio=org;
    }
    if(rvs>=0){
        add[1]->su.qos_prio=rvs;
    }
}


extern u_int8_t nsa_qosid_mark_map[MAX_SCI_NUMS][NSA_MAX_QOS_ID];


int nsa_get_flow_statitcs(int *pos);

#define NSA_LIF_PHY     0               /* the logical interface is physical interface */
#define NSA_LIF_LA      0xffff          /* the logical interface is LA interface */
 
int nsa_add_lif(struct net_device *pdev, struct net_device *ldev, int vid);
int nsa_remove_lif(struct net_device *pdev, struct net_device *ldev, int vid);
int nsa_set_lif_limit(struct net_device *, int limit);
int nsa_set_lif_zone(struct net_device *, int zone);
int nsa_add_lif_la(struct net_device *pdev, struct net_device *ldev, int la_id);
int nsa_del_lif_la(struct net_device *pdev, struct net_device *ldev, int la_id);


/* manually new/delete/mod ssn via fast path, for testin and debugging only */
int nsa_new_ssn(struct nsa_enabler *enabler, struct nsa_ssn_add *add);
int nsa_delete_ssn(struct nsa_enabler *enabler, struct nsa_ssn_del* del);
int nsa_mod_ssn(struct nsa_enabler *enabler, struct nsa_ssn_mod *mod);

void nsa_process_aging(struct nsa_enabler *enabler, struct sk_buff *skb);

char *dump_ssn_bucket(struct nsa_enabler *enabler, u_int32_t index);


extern int proc_match(int len, const char *name, struct proc_dir_entry *de);
extern struct proc_dir_entry * nsa_dev_create_proc(char *name,struct proc_dir_entry *parent,int *ret);

#define dump_skb(s, skb, l) do {\
    int i;\
    printk("\n%s %s skb: len = %d data_len = %d\n\t", __FUNCTION__, s, skb->len, skb->data_len);\
    for(i=0; i<l; i++) {\
        printk("%02x ", skb->data[i]);\
        if((i+1)%8==0) {\
            printk("\n\t");\
        }\
    } \
    printk("\n"); \
}while(0)

#endif /* __KERNEL__ */
#endif /*_NSA_API_H_*/

