/*
 * This file is part of the Linux NIC driver for Emulex networking products.
 *
 * Copyright (C) 2005-2013 Emulex. All rights reserved.
 *
 * EMULEX and SLI are trademarks of Emulex.
 * www.emulex.com
 * linux-drivers@emulex.com
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful.
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE
 * EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
 * See the GNU General Public License for more details, a copy of which
 * can be found in the file COPYING included with this package
 */

#ifndef BE_COMPAT_H
#define BE_COMPAT_H

#ifdef RHEL_RELEASE_CODE
#define RHEL
#endif

#ifndef RHEL_RELEASE_CODE
#define RHEL_RELEASE_CODE 0
#endif

#ifndef RHEL_RELEASE_VERSION
#define RHEL_RELEASE_VERSION(a,b)	(((a) << 8) + (b))
#endif

#ifndef NETIF_F_HW_VLAN_CTAG_DEFINED
#define NETIF_F_HW_VLAN_CTAG_TX         NETIF_F_HW_VLAN_TX
#define NETIF_F_HW_VLAN_CTAG_RX         NETIF_F_HW_VLAN_RX
#define NETIF_F_HW_VLAN_CTAG_FILTER     NETIF_F_HW_VLAN_FILTER
#endif
/****************** SLES10 backport ***************************/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
#include <linux/tifm.h>
#define DMA_BIT_MASK(n) 		(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
#define upper_32_bits(n)		((u32)(((n) >> 16) >> 16))
#define CHECKSUM_PARTIAL		CHECKSUM_HW
#define CHECKSUM_COMPLETE		CHECKSUM_HW
#define ip_hdr(skb)			(skb->nh.iph)
#define ipv6_hdr(skb)			(skb->nh.ipv6h)
#define __packed			__attribute__ ((packed))
#define bool				u8
#define FIELD_SIZEOF(t, f)		(sizeof(((t *)0)->f))
#define IRQF_SHARED			SA_SHIRQ
#define NETIF_F_TSO6			NETIF_F_TSO

/* Only for SLES10.3 */
enum { false = 0, true = 1 };

static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
	skb->nh.raw = skb->data + offset;
}

static inline int skb_is_gso_v6(const struct sk_buff *skb)
{
	return (ip_hdr(skb)->version == 6);
}
/* In SLES 10.x, when CONFIG_PCIEAER is not defined, the macro
 * defines for the 3 APIs below coming from the kernel hdr file(aer.h) are
 * broken and cause compilation errors.
 */
#ifndef CONFIG_PCIEAER
#undef pci_enable_pcie_error_reporting
#undef pci_disable_pcie_error_reporting
#undef pci_cleanup_aer_uncorrect_error_status
static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
	return -EINVAL;
}
static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
	return -EINVAL;
}
static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
	return -EINVAL;
}
#endif
#endif /********************** SLES10 backport ***************************/

/****************************** RHEL5 backport ***************************/
/* Backport of request_irq */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
typedef irqreturn_t(*backport_irq_handler_t) (int, void *);
static inline int request_irq_compat(uint irq,
				     irqreturn_t(*handler) (int, void *),
				     ulong flags, const char *dev_name,
				     void *dev_id)
{
	return request_irq(irq,
		(irqreturn_t(*) (int, void *, struct pt_regs *))handler,
		flags, dev_name, dev_id);
}
#define request_irq			request_irq_compat
#endif /************************ RHEL5 backport ***************************/

/*************************** NAPI backport ********************************/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)

#ifndef GRO_defined
#define napi_gro_flush(napi)		do {} while(0)
#endif

/* RHEL 5.4+ has a half baked napi_struct implementation.
 * Bypass it and use simulated NAPI using multiple netdev structs
 */
#ifdef RHEL
typedef struct napi_struct		rhel_napi;
#endif

#define netif_napi_add			netif_napi_add_compat
#define	netif_napi_del			netif_napi_del_compat
#define napi_gro_frags(napi) 		napi_gro_frags((rhel_napi*) napi)
#define napi_get_frags(napi)		napi_get_frags((rhel_napi*) napi)
#define vlan_gro_frags(napi, g, v)	vlan_gro_frags((rhel_napi*) napi, g, v);
#define napi_schedule(napi)		netif_rx_schedule((napi)->dev)
#define napi_enable(napi)		netif_poll_enable((napi)->dev)
#define napi_disable(napi)		netif_poll_disable((napi)->dev)
#define napi_complete(napi)		napi_gro_flush((rhel_napi *)napi); \
					netif_rx_complete(napi->dev)
#define napi_schedule_prep(napi)	netif_rx_schedule_prep((napi)->dev)
#define __napi_schedule(napi)		__netif_rx_schedule((napi)->dev)

#define napi_struct			napi_struct_compat

struct napi_struct_compat {
#ifdef RHEL
	rhel_napi napi;	/* must be the first member */
#endif
	struct net_device *dev;
	int (*poll) (struct napi_struct *napi, int budget);
};

extern void netif_napi_del_compat(struct napi_struct *napi);
extern void netif_napi_add_compat(struct net_device *, struct napi_struct *,
				int (*poll) (struct napi_struct *, int), int);
#endif /*********************** NAPI backport *****************************/


/*********************** Backport of netdev ops struct ********************/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
struct net_device_ops {
	int	(*ndo_init)(struct net_device *dev);
	void	(*ndo_uninit)(struct net_device *dev);
	int	(*ndo_open)(struct net_device *dev);
	int	(*ndo_stop)(struct net_device *dev);
	int	(*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev);
	u16	(*ndo_select_queue)(struct net_device *dev,
				    struct sk_buff *skb);
	void	(*ndo_change_rx_flags)(struct net_device *dev, int flags);
	void	(*ndo_set_rx_mode)(struct net_device *dev);
	void	(*ndo_set_multicast_list)(struct net_device *dev);
	int	(*ndo_set_mac_address)(struct net_device *dev, void *addr);
	int	(*ndo_validate_addr)(struct net_device *dev);
	int	(*ndo_do_ioctl)(struct net_device *dev,
			struct ifreq *ifr, int cmd);
	int	(*ndo_set_config)(struct net_device *dev, struct ifmap *map);
	int	(*ndo_change_mtu)(struct net_device *dev, int new_mtu);
	int	(*ndo_neigh_setup)(struct net_device *dev,
				struct neigh_parms *);
	void	(*ndo_tx_timeout) (struct net_device *dev);

	struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);

	void	(*ndo_vlan_rx_register)(struct net_device *dev,
				struct vlan_group *grp);
	void	(*ndo_vlan_rx_add_vid)(struct net_device *dev,
				unsigned short vid);
	void	(*ndo_vlan_rx_kill_vid)(struct net_device *dev,
				unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
	void	(*ndo_poll_controller)(struct net_device *dev);
#endif
};

#define eth_validate_addr		NULL
extern void be_netdev_ops_init(struct net_device *n, struct net_device_ops *p);

#endif /******************** Backport of netdev ops struct ****************/

/*************** Backport of Delayed work queues **************************/
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
	LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
struct delayed_work {
	struct work_struct work;
};
#endif

/******** initialize all of a work-struct: ***************/
static inline void INIT_WORK_COMPAT(struct work_struct *work, void (*func))
{
	INIT_WORK(work, func, work);
}
#undef INIT_WORK
#define INIT_WORK INIT_WORK_COMPAT
#undef INIT_DELAYED_WORK
#define INIT_DELAYED_WORK(_work, _func)	INIT_WORK(&(_work)->work, _func)

#ifndef CANCEL_DELAYED_WORK_SYNC_defined
static inline int backport_cancel_delayed_work_sync(struct delayed_work *work)
{
	cancel_rearming_delayed_work(&work->work);
	return 0;
}
#define cancel_delayed_work_sync backport_cancel_delayed_work_sync
#endif

static inline int backport_schedule_delayed_work(struct delayed_work *work,
		unsigned long delay)
{
	if (unlikely(!delay))
		return schedule_work(&work->work);
	else
		return schedule_delayed_work(&work->work, delay);
}
#define schedule_delayed_work backport_schedule_delayed_work
#endif /*************** Backport of Delayed work queues ******************/

/************************* Multi TXQ Support *****************************/
/* Supported only in RHEL6 and SL11.1 (barring one execption) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
#define MQ_TX
#define tx_mq_kernel				true
#else
#define tx_mq_kernel				false
#endif

#ifndef ALLOC_ETHDEV_MQS_defined 
#ifdef ALLOC_ETHDEV_MQ_defined
#define alloc_etherdev_mqs(sz, tx_cnt, rx_cnt)  alloc_etherdev_mq(sz, tx_cnt)
#else
#define alloc_etherdev_mqs(sz, tx_cnt, rx_cnt)  alloc_etherdev(sz)
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
#define alloc_etherdev_mq(sz, cnt) 		alloc_etherdev(sz)
#define skb_get_queue_mapping(skb)		0
#define skb_tx_hash(dev, skb)			0
#define netif_tx_start_all_queues(dev)		netif_start_queue(dev)
#define netif_wake_subqueue(dev, idx)		netif_wake_queue(dev)
#define netif_stop_subqueue(dev, idx)		netif_stop_queue(dev)
#define __netif_subqueue_stopped(dev, idx)	netif_queue_stopped(dev)
#endif /* < 2.6.27 */

#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && \
		        (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)))
#define skb_tx_hash(dev, skb)			0
#endif

#ifndef NETIF_SET_REAL_NUM_TX_QS_defined
static inline void netif_set_real_num_tx_queues(struct net_device *dev,
						unsigned int txq)
{
#ifdef REAL_NUM_TX_QS_defined
	dev->real_num_tx_queues = txq;
#endif
}
#endif

/********************** Multi TXQ Support **************************/

#ifndef GET_NUM_DEF_RSS_defined
#define DEFAULT_MAX_NUM_RSS_QUEUES	(8)
int netif_get_num_default_rss_queues(void);
#endif

#ifndef NETIF_SET_REAL_NUM_RX_QS_defined
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
					       unsigned int rxq)
{
	return 0;
}
#endif

#ifdef NETDEV_RPS_INFO_defined
/* Handling RHEL6.4 kernel bug related to netif_set_real_num_rx_queues.
 * Calling of netif_set_real_num_rx_queues before register_netdev results in
 * updating both num_rx_queues and real_num_rx_queues with given num_rx_qs.
 * Because of this unable to increase the num_rx_qs with set_channel
 */
static inline int netif_set_real_num_rx_queues_fixed(struct net_device *dev,
						      unsigned int rxq)
{
	unsigned int num_rx_queues;
	int status;

	num_rx_queues = netdev_extended(dev)->rps_data.num_rx_queues;

	status = netif_set_real_num_rx_queues(dev, rxq);

	/*Restoring num_rx_queues*/
	netdev_extended(dev)->rps_data.num_rx_queues = num_rx_queues;

	return status;
}

#define netif_set_real_num_rx_queues	netif_set_real_num_rx_queues_fixed

#endif

#ifndef SKB_RECORD_RX_QUEUE_defined
#define skb_record_rx_queue(skb, index) do {} while(0)
#endif

#ifndef ALLOC_SKB_defined
static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
					       unsigned int length)
{
	struct sk_buff *skb;

	/* 16 == NET_PAD_SKB */
	skb = alloc_skb(length + 16, GFP_ATOMIC);
	if (likely(skb != NULL)) {
		skb_reserve(skb, 16);
		skb->dev = dev;
	}
	return skb;
}
#endif

#ifndef DEFINE_PCI_DEVICE_TABLE 
#define DEFINE_PCI_DEVICE_TABLE(t) 	struct pci_device_id t[] __devinitdata
#endif

#ifndef PTR_ALIGN
#define PTR_ALIGN(p, a)			((typeof(p)) ALIGN((ulong)(p), (a)))
#endif

#ifndef	ETH_FCS_LEN 
#define ETH_FCS_LEN			4
#endif

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)
#define netdev_tx_t			int
#endif

#ifndef VLAN_PRIO_MASK
#define VLAN_PRIO_MASK          	0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT         	13
#endif

#if defined(USE_NEW_VLAN_MODEL) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
/* vlan_gro_frags() can be safely called when vlan_group is NULL
 * for kernels >= 3.0 or when kernels uses USE_NEW_VLAN_MODEL.
 */
#define NULL_VLAN_GRP_SAFE
#endif

#if !defined(VLAN_GRP_SET_DEV_defined) && !defined(USE_NEW_VLAN_MODEL)
static inline void vlan_group_set_device(struct vlan_group *vg, u16 vlan_id,
					struct net_device *dev)
{
	struct net_device **array;

	if (!vg)
		return;
	array = vg->vlan_devices;
	array[vlan_id] = dev;
}
#endif /* VLAN_GRP_SET_DEV_defined */

#ifndef ETHTOOL_FLASH_MAX_FILENAME
#define ETHTOOL_FLASH_MAX_FILENAME	128
#endif

#if defined(CONFIG_XEN) && !defined(GRO_defined)
#define BE_INIT_FRAGS_PER_FRAME		(u32) 1
#else
#define BE_INIT_FRAGS_PER_FRAME		(min((u32) 16, (u32) MAX_SKB_FRAGS))
#endif

#ifndef ALLOC_SKB_IP_ALIGN_defined
static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
		unsigned int length)
{
	struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);

	if (NET_IP_ALIGN && skb)
		skb_reserve(skb, NET_IP_ALIGN);
	return skb;
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
#ifndef netif_set_gso_max_size
#define netif_set_gso_max_size(netdev, size) do {} while (0)
#endif
#endif

#ifndef NETIF_F_VLAN_SG
#define NETIF_F_VLAN_SG			NETIF_F_SG
#endif

#ifndef NETIF_F_VLAN_CSUM
#define NETIF_F_VLAN_CSUM		0
#endif

#ifndef NETIF_F_VLAN_TSO
#define NETIF_F_VLAN_TSO		NETIF_F_TSO
#endif

#ifndef NETIF_F_IPV6_CSUM
#define NETIF_F_IPV6_CSUM		NETIF_F_HW_CSUM
#endif

#ifndef NETIF_F_RXCSUM
#define NETIF_F_RXCSUM			0
#endif

#ifndef NETIF_F_RXHASH
#define NETIF_F_RXHASH			0
#endif

#ifndef NDO_SET_FEATURES_defined
#define hw_features			features
#endif

#ifndef NETDEV_FEATURES_defined
#define netdev_features_t		u32
#endif

#ifndef VLAN_GROUP_ARRAY_LEN
#define VLAN_GROUP_ARRAY_LEN		VLAN_N_VID
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
#define vlan_features			features
#endif

#ifndef DEFINE_DMA_UNMAP_ADDR
#define DEFINE_DMA_UNMAP_ADDR(bus)	dma_addr_t bus
#endif

#ifndef netdev_for_each_mc_addr
#define netdev_for_each_mc_addr(h, n) 	for (h = (n)->mc_list; h; h = h->next)
#endif

#ifndef netdev_mc_count
#define netdev_mc_count(nd)		(nd->mc_count)
#endif

#ifdef netdev_uc_empty
#define NETDEV_UC_defined
#endif

#ifndef PORT_DA
#define PORT_DA				PORT_FIBRE
#endif

#ifndef PORT_OTHER
#define PORT_OTHER			PORT_FIBRE
#endif

#ifndef SUPPORTED_Backplane
#define SUPPORTED_Backplane             (1 << 16)
#endif

#ifndef SUPPORTED_1000baseKX_Full
#define SUPPORTED_1000baseKX_Full	(1 << 17)
#endif

#ifndef SUPPORTED_10000baseKX4_Full
#define SUPPORTED_10000baseKX4_Full     (1 << 18)
#endif

#ifndef SUPPORTED_10000baseKR_Full
#define SUPPORTED_10000baseKR_Full      (1 << 19)
#endif

#ifndef SUPPORTED_40000baseKR4_Full
#define SUPPORTED_40000baseKR4_Full	(1 << 23)
#endif

#ifndef SUPPORTED_40000baseCR4_Full
#define SUPPORTED_40000baseCR4_Full	(1 << 24)
#endif

#ifndef SUPPORTED_40000baseSR4_Full
#define SUPPORTED_40000baseSR4_Full	(1 << 25)
#endif

#ifndef SUPPORTED_40000baseLR4_Full
#define SUPPORTED_40000baseLR4_Full	(1 << 26)
#endif

#ifndef ADVERTISED_1000baseKX_Full
#define ADVERTISED_1000baseKX_Full	(1 << 17)
#endif

#ifndef ADVERTISED_10000baseKX4_Full
#define ADVERTISED_10000baseKX4_Full	(1 << 18)
#endif

#ifndef ADVERTISED_10000baseKR_Full
#define ADVERTISED_10000baseKR_Full	(1 << 19)
#endif

#ifndef ADVERTISED_40000baseKR4_Full
#define ADVERTISED_40000baseKR4_Full	(1 << 23)
#endif

/* When new mc-list macros were used in 2.6.35, dev_mc_list was dropped */
#ifdef DEV_MC_LIST_defined
#define DMI_ADDR			dmi_addr
#else
#define DMI_ADDR			addr
#define dev_mc_list			netdev_hw_addr
#endif /* dev_mc_list */

#ifndef speed_hi
#define speed_hi			speed
#endif

#ifndef ETHTOOL_CMD_SPEED_SET_defined
static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
					__u32 speed)
{
	ep->speed = (__u16)speed;
}
#endif

#ifndef ETHTOOL_CMD_SPEED_defined
static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep)
{
	return ep->speed;
}
#endif

#ifndef PHYS_ID_STATE_defined
enum ethtool_phys_id_state {
	ETHTOOL_ID_INACTIVE,
	ETHTOOL_ID_ACTIVE,
	ETHTOOL_ID_ON,
	ETHTOOL_ID_OFF
};
#define set_phys_id			phys_id
#define be_set_phys_id			be_phys_id
#endif /* PHYS_ID_STATE_defined */

#ifndef PCI_EXP_LNKCAP_SLS
#define  PCI_EXP_LNKCAP_SLS     0x0000000f /* Supported Link Speeds */
#endif

static inline void be_reset_skb_tx_vlan(struct sk_buff *skb)
{
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
	skb->vlan_tci = 0;
#else
	struct vlan_skb_tx_cookie *cookie;

	cookie = VLAN_TX_SKB_CB(skb);
	cookie->magic = 0;
#endif
}

#ifndef IS_ALIGNED
#define IS_ALIGNED(x, a)		(((x) & ((typeof(x))(a) - 1)) == 0)
#endif

#ifndef ACCESS_ONCE
#define ACCESS_ONCE(x)			(*(volatile typeof(x) *)&(x))
#endif

static inline struct sk_buff *__vlan_put_tag_fixed(struct sk_buff *skb,
						__be16 vlan_proto,
						ushort vlan_tag)
{
#ifdef VLAN_FUNCS_USES_PROTO
	struct sk_buff *new_skb = __vlan_put_tag(skb, vlan_proto, vlan_tag);
#else
	struct sk_buff *new_skb = __vlan_put_tag(skb, vlan_tag);
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
	/* On kernel versions < 2.6.27 the __vlan_put_tag() function
	 * distorts the network layer hdr pointer in the skb which
	 * affects the detection of UDP/TCP packets down the line in
	 * wrb_fill_hdr().This work-around sets it right.
	 */
	skb_set_network_header(new_skb, VLAN_ETH_HLEN);
#endif
	return new_skb;
}

#ifdef USE_NEW_VLAN_MODEL

#if !defined(VLAN_GRP_defined)
struct vlan_group {
	char dummy;
};
#endif

static inline int vlan_hwaccel_receive_skb_compat(struct sk_buff *skb,
						  struct vlan_group *grp,
						  u16 vlan_tci)
{
#ifdef VLAN_FUNCS_USES_PROTO
	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
#else
	__vlan_hwaccel_put_tag(skb, vlan_tci);
#endif
	return netif_receive_skb(skb);
}

static inline gro_result_t vlan_gro_frags_compat(struct napi_struct *napi,
						 struct vlan_group *grp,
						 unsigned int vlan_tci)
{
#ifdef VLAN_FUNCS_USES_PROTO
	__vlan_hwaccel_put_tag(napi->skb, htons(ETH_P_8021Q), vlan_tci);
#else
	__vlan_hwaccel_put_tag(napi->skb, vlan_tci);
#endif
	return napi_gro_frags(napi);
}

#define	vlan_hwaccel_receive_skb		vlan_hwaccel_receive_skb_compat
#define	vlan_gro_frags				vlan_gro_frags_compat
#define vlan_group_set_device(vg, v, d)		do {} while(0)

#endif /* USE_NEW_VLAN_MODEL */

#ifndef VLAN_FUNCS_USES_PROTO
#define be_vlan_add_vid(netdev, proto, vid)	be_vlan_add_vid(netdev, vid)
#define be_vlan_rem_vid(netdev, proto, vid)	be_vlan_rem_vid(netdev, vid)
#endif

#ifndef SKB_FRAG_API_defined
static inline dma_addr_t skb_frag_dma_map(struct device *dev,
					  const skb_frag_t *frag,
					  size_t offset, size_t size,
					  enum dma_data_direction dir)
{
	return dma_map_page(dev, frag->page, frag->page_offset + offset, size,
			    dir);
}

static inline void skb_frag_set_page(struct sk_buff *skb, int f,
				     struct page *page)
{
	skb_shinfo(skb)->frags[f].page = page;
}
#endif /* SKB_FRAG_API_define */

#ifndef SKB_FRAG_SIZE_defined
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
        frag->size = size;
}

static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
        frag->size += delta;
}

static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
        return frag->size;
}
#endif /* SKB_FRAG_SIZE_defined */

/* This API is broken in RHEL 6.3 due to half-baked back-porting. Additional
 * check needed to cover for Oracle UEK 6.3
 */
#if RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,3) || \
	LINUX_VERSION_CODE == KERNEL_VERSION(3, 0, 16) || \
	LINUX_VERSION_CODE == KERNEL_VERSION(3, 0, 36)
#define skb_frag_set_page(skb, f, p)	(skb_shinfo(skb)->frags[f].page = p)
#endif

#ifndef PCI_PHYSFN_defined
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
{
#ifdef CONFIG_PCI_IOV
	if (dev->is_virtfn)
		dev = dev->physfn;
#endif

	return dev;
}
#endif /* PCI_PHYSFN_defined */

/******************************** SRIOV ************************************/
#ifndef PCI_SRIOV_GET_TOTALVFS_defined
int pci_sriov_get_totalvfs(struct pci_dev *pdev);
#endif

/* Half baked support for SRIOV in older kernels */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) && defined(CONFIG_PCI_IOV)
#define sriov_kernel				true
#else
#define sriov_kernel				false
#endif

#ifndef PCI_FLAGS_ASSIGNED_defined
#define	PCI_DEV_FLAGS_ASSIGNED			0
#define dev_flags				class /* just for compiler */ 
#endif /* PCI_FLAGS_ASSIGNED_defined */

/* To avoid compilation errors */
#ifndef CONFIG_PCI_IOV
#define	pci_find_ext_capability(d, c)		(0)
#define is_virtfn				class
#define pci_enable_sriov(x, y)			(0)
#define	pci_disable_sriov(x)			do {} while(0)
#endif

#ifdef CONFIG_PCI_IOV
int be_find_vfs(struct pci_dev *pdev, int vf_state);
#ifndef PCI_VFS_ASSIGNED_defined
int pci_vfs_assigned(struct pci_dev *pdev);
#endif

#ifndef PCI_NUM_VF_defined
int pci_num_vf(struct pci_dev *pdev);
#endif
#else
#define pci_vfs_assigned(x)			0
#define pci_num_vf(x)				0
#endif /* CONFIG_PCI_IOV */

#ifndef PCI_EXT_CAP_ID_SRIOV
#define PCI_EXT_CAP_ID_SRIOV			0
#define PCI_SRIOV_VF_OFFSET			0
#define	PCI_SRIOV_VF_STRIDE			0
#define	PCI_SRIOV_TOTAL_VF			0
#endif

#ifndef NDO_GET_STATS64_defined
struct u64_stats_sync {
	unsigned dummy;
};
#define rtnl_link_stats64			net_device_stats
/* Dummy implementation; also, avoid warnings */
#define u64_stats_update_begin(x)		do {} while(0)
#define u64_stats_update_end(x)			do {} while(0)
#define u64_stats_fetch_begin_bh(x)		((x)->dummy)
#define u64_stats_fetch_retry_bh(x, y)		((x)->dummy != y)
#endif

#ifdef HLIST_ENTRY_IS_NEW
#undef hlist_for_each_entry_safe
#define hlist_for_each_entry_safe(node, unused, n, head, member)	\
	for (unused = NULL,\
	     node = hlist_entry_safe((head)->first, typeof(*node), member);\
	     node && ({ n = node->member.next; 1; });                     \
	     node = hlist_entry_safe(n, typeof(*node), member))
#endif

#ifdef SKB_TRANSPORT_HDR_OLD
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
	return ((skb)->h.raw);
}
#endif

#ifdef ICMP6_HDR_IS_OLD
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
{
	return (struct icmp6hdr *)skb_transport_header(skb);
}
#endif

#ifdef UDP_HDR_IS_OLD
static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
{
	return (struct udphdr *)skb_transport_header(skb);
}
#endif

void be_update_xmit_trans_start(struct net_device *netdev, int i);

#ifndef DMA_SET_COHERENT_MASK_defined
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
{
	if (!dma_supported(dev, mask))
		return -EIO;

	dev->coherent_dma_mask = mask;

	return 0;
}
#endif /* DMA_SET_COHERENT_MASK_defined */

#ifndef DMA_SET_MASK_AND_COHERENT_defined
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
	int status;

	status = dma_set_mask(dev, mask);
	if (!status)
		dma_set_coherent_mask(dev, mask);

	return status;
}
#endif /* DMA_SET_MASK_AND_COHERENT_defined */
#endif				/* BE_COMPAT_H */
