/*
 * This file is part of the Linux NIC driver for Emulex networking products.
 *
 * Copyright (C) 2005-2014 Emulex. All rights reserved.
 *
 * EMULEX and SLI are trademarks of Emulex.
 * www.emulex.com
 * linux-drivers@emulex.com
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful.
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE
 * EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
 * See the GNU General Public License for more details, a copy of which
 * can be found in the file COPYING included with this package
 */

#include <linux/module.h>
#include "be.h"
#include "be_cmds.h"
#include <asm/div64.h>
#include <linux/if_bridge.h>

#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE)
#include "if_be2net_netmap.h"
#endif

MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
MODULE_INFO(supported, "external");

static ushort rss_on_mc = 0;
module_param(rss_on_mc, ushort, S_IRUGO);
MODULE_PARM_DESC(rss_on_mc, "Enable RSS in multi-channel functions with the "
		 "capability. Disabled by default.");

static uint tx_prio = 0;
module_param(tx_prio, uint, S_IRUGO);
MODULE_PARM_DESC(tx_prio, "Create priority based TX queues."
			  " Disabled by default");

static unsigned int num_vfs;
module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");

static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of receive fragment buffer"
			       " - 2048 (default), 4096 or 8192");
static unsigned int gro = 1;
module_param(gro, uint, S_IRUGO);
MODULE_PARM_DESC(gro, "Enable or Disable GRO. Enabled by default");

static unsigned int emi_canceller = 0;
module_param(emi_canceller, uint, S_IRUGO);
MODULE_PARM_DESC(emi_canceller, "Enable or Disable EMI Canceller."
				" Disabled by default");

static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
//	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID7)},
	{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);

/* UE Status Low CSR */
static char *ue_status_low_desc[] = {
	"CEV",
	"CTX",
	"DBUF",
	"ERX",
	"Host",
	"MPU",
	"NDMA",
	"PTC ",
	"RDMA ",
	"RXF ",
	"RXIPS ",
	"RXULP0 ",
	"RXULP1 ",
	"RXULP2 ",
	"TIM ",
	"TPOST ",
	"TPRE ",
	"TXIPS ",
	"TXULP0 ",
	"TXULP1 ",
	"UC ",
	"WDMA ",
	"TXULP2 ",
	"HOST1 ",
	"P0_OB_LINK ",
	"P1_OB_LINK ",
	"HOST_GPIO ",
	"MBOX ",
	"AXGMAC0",
	"AXGMAC1",
	"JTAG",
	"MPU_INTPEND"
};

/* UE Status High CSR */
static char *ue_status_hi_desc[] = {
	"LPCMEMHOST",
	"MGMT_MAC",
	"PCS0ONLINE",
	"MPU_IRAM",
	"PCS1ONLINE",
	"PCTL0",
	"PCTL1",
	"PMEM",
	"RR",
	"TXPB",
	"RXPP",
	"XAUI",
	"TXP",
	"ARM",
	"IPC",
	"HOST2",
	"HOST3",
	"HOST4",
	"HOST5",
	"HOST6",
	"HOST7",
	"HOST8",
	"HOST9",
	"NETC",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown"
};

static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_dma_mem *mem = &q->dma_mem;
	if (mem->va) {
		pci_free_consistent(adapter->pdev, mem->size,
				    mem->va, mem->dma);
		mem->va = NULL;
	}
}

static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
		u16 len, u16 entry_size)
{
	struct be_dma_mem *mem = &q->dma_mem;

	memset(q, 0, sizeof(*q));
	q->len = len;
	q->entry_size = entry_size;
	mem->size = len * entry_size;
	mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
	if (!mem->va)
		return -ENOMEM;
	memset(mem->va, 0, mem->size);
	return 0;
}

static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
{
	u32 reg, enabled;
	
	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
			      &reg);
	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;

	if (!enabled && enable)
		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
	else if (enabled && !enable)
		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
	else
		return;

	pci_write_config_dword(adapter->pdev,
			       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}

static void be_intr_set(struct be_adapter *adapter, bool enable)
{
	int status = 0;

	/* On lancer interrupts can't be controlled via this register */
	if (lancer_chip(adapter))
		return;

	if (adapter->eeh_error)
		return;

	status = be_cmd_intr_set(adapter, enable);
	if (status)
		be_reg_intr_set(adapter, enable);
}

static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
{
	u32 val = 0;

	if (be_hw_error(adapter))
		return;

	val |= qid & DB_RQ_RING_ID_MASK;
	val |= posted << DB_RQ_NUM_POSTED_SHIFT;

	wmb();
	iowrite32(val, adapter->db + DB_RQ_OFFSET);
}

/* posted indicates the no: of slots that were filled in the Tx Q , 1 pkt = 2 slots */
static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
			  u16 posted)
{
	u32 val = 0;

	if (be_hw_error(adapter))
		return;

	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;

	wmb();
	ND("Ring DB for %d WRBs txq->head = %d", posted, txo->q.head);
	iowrite32(val, adapter->db + txo->db_offset);
}

static void be_eq_notify(struct be_adapter *adapter, u16 qid,
		bool arm, bool clear_int, u16 num_popped)
{
	u32 val = 0;

	if (be_hw_error(adapter))
		return;

	val |= qid & DB_EQ_RING_ID_MASK;
	val |= (qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT;
	if (arm)
		val |= 1 << DB_EQ_REARM_SHIFT;
	if (clear_int)
		val |= 1 << DB_EQ_CLR_SHIFT;
	val |= 1 << DB_EQ_EVNT_SHIFT;
	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
	iowrite32(val, adapter->db + DB_EQ_OFFSET);
}

/* num_popped indicates the no: of completions counted */
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
	u32 val = 0;

	if (be_hw_error(adapter))
		return;

	val |= qid & DB_CQ_RING_ID_MASK;
	val |= (qid & DB_CQ_RING_ID_EXT_MASK) << DB_CQ_RING_ID_EXT_MASK_SHIFT;
	if (arm)
		val |= 1 << DB_CQ_REARM_SHIFT;
	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;

	iowrite32(val, adapter->db + DB_CQ_OFFSET);
}

static int be_mac_addr_set(struct net_device *netdev, void *p)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct device *dev = &adapter->pdev->dev;
	struct sockaddr *addr = p;
	int status;
	u8 mac[ETH_ALEN];
	u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	/* Proceed further only if, User provided MAC is different
	 * from active MAC */
	if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
		return 0;

	/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
	 * privilege or if PF did not provision the new MAC address.
	 * On BE3, this cmd will always fail if the VF doesn't have the
	 * FILTMGMT privilege. This failure is OK, only if the PF programmed
	 * the MAC for the VF.
	 */
	status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
				 adapter->if_handle, &adapter->pmac_id[0], 0);
	if (!status) {
		curr_pmac_id = adapter->pmac_id[0];

		/* Delete the old programmed MAC. This call may fail if the
		 * old MAC was already deleted by the PF driver.
		 */
		if (adapter->pmac_id[0] != old_pmac_id)
			be_cmd_pmac_del(adapter, adapter->if_handle,
					old_pmac_id, 0);
	}

	/* Decide if the new MAC is successfully activated only after
	 * querying the FW
	 */
	status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
	if (status)
		goto err;

	/* The MAC change did not happen, either due to lack of privilege
	 * or PF didn't pre-provision.
	 */
	if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
		status = -EPERM;
		goto err;
	}

	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
	dev_info(dev, "MAC address changed to %pM\n", mac);
	return 0;
err:
	dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
	return status;
}

static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		/* BE2 supports only V0 cmd */
		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
	} else if (BE3_chip(adapter)) {
		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
	} else {
		/* for skyhawk and others */
		struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
	}
}

static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		/* BE2 supports only V0 cmd */
		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
	} else if (BE3_chip(adapter)) {
		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
	} else {
		/* for skyhawk and others */
		struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
	}
}

static void populate_be_v0_stats(struct be_adapter *adapter)
{
	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
	struct be_port_rxf_stats_v0 *port_stats =
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;

	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_address_filtered =
					port_stats->rx_address_filtered +
					port_stats->rx_vlan_filtered;
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;

	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;

	if (adapter->port_num)
		drvs->jabber_events = rxf_stats->port1_jabber_events;
	else
		drvs->jabber_events = rxf_stats->port0_jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

static void populate_be_v1_stats(struct be_adapter *adapter)
{
	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
	struct be_port_rxf_stats_v1 *port_stats =
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;

	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
	drvs->rx_address_filtered = port_stats->rx_address_filtered;
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

static void populate_be_v2_stats(struct be_adapter *adapter)
{
	struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
	struct be_port_rxf_stats_v2 *port_stats =
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;

	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
	drvs->rx_address_filtered = port_stats->rx_address_filtered;
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

static void populate_lancer_stats(struct be_adapter *adapter)
{

	struct be_drv_stats *drvs = &adapter->drv_stats;
	struct lancer_pport_stats *pport_stats =
					pport_stats_from_cmd(adapter);

	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
	drvs->rx_dropped_tcp_length =
				pport_stats->rx_dropped_invalid_tcp_length;
	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
	drvs->rx_dropped_header_too_small =
				pport_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
	drvs->rx_address_filtered =
					pport_stats->rx_address_filtered +
					pport_stats->rx_vlan_filtered;
	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
	drvs->jabber_events = pport_stats->rx_jabbers;
	drvs->forwarded_packets = pport_stats->num_forwards_lo;
	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
	drvs->rx_drops_too_many_frags =
				pport_stats->rx_drops_too_many_frags_lo;
}

static void accumulate_16bit_val(u32 *acc, u16 val)
{
#define lo(x)			(x & 0xFFFF)
#define hi(x)			(x & 0xFFFF0000)
	bool wrapped = val < lo(*acc);
	u32 newacc = hi(*acc) + val;

	if (wrapped)
		newacc += 65536;
	ACCESS_ONCE(*acc) = newacc;
}

static void populate_erx_stats(struct be_adapter *adapter,
			       struct be_rx_obj *rxo,
			       u32 erx_stat)
{
	if (!BEx_chip(adapter))
		rx_stats(rxo)->rx_drops_no_frags = erx_stat;
	else
		/* below erx HW counter can actually wrap around after
		 * 65535. Driver accumulates a 32-bit value
		 */
		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
				     (u16)erx_stat);
}

void be_parse_stats(struct be_adapter *adapter)
{
	struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
	struct be_rx_obj *rxo;
	int i;
	u32 erx_stat;

	if (lancer_chip(adapter)) {
		populate_lancer_stats(adapter);
	} else {
		if (BE2_chip(adapter))
			populate_be_v0_stats(adapter);
		else if (BE3_chip(adapter))
			/* for BE3 */
			populate_be_v1_stats(adapter);
		else
			/* for skyhawk */
			populate_be_v2_stats(adapter);

		/* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
		for_all_rx_queues(adapter, rxo, i) {
			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
			populate_erx_stats(adapter, rxo, erx_stat);
		}
	}
}

static void be_get_error_stats(struct be_adapter *adapter,
			       struct rtnl_link_stats64 *stats)
{
	struct be_drv_stats *drvs = &adapter->drv_stats;

	/* bad pkts received */
	stats->rx_errors = drvs->rx_crc_errors +
		drvs->rx_alignment_symbol_errors +
		drvs->rx_in_range_errors +
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long +
		drvs->rx_dropped_too_small +
		drvs->rx_dropped_too_short +
		drvs->rx_dropped_header_too_small +
		drvs->rx_dropped_tcp_length +
		drvs->rx_dropped_runt;

	/* detailed rx errors */
	stats->rx_length_errors = drvs->rx_in_range_errors +
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long;

	stats->rx_crc_errors = drvs->rx_crc_errors;

	/* frame alignment errors */
	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;

	/* receiver fifo overrun */
	/* drops_no_pbuf is no per i/f, it's per BE card */
	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
				drvs->rx_input_fifo_overflow_drop +
				drvs->rx_drops_no_pbuf;
}

#ifndef NDO_GET_STATS64_defined
static struct net_device_stats *be_get_stats(struct net_device *dev)
{
	struct be_adapter *adapter = netdev_priv(dev);
	struct net_device_stats *stats = &adapter->net_stats;
	struct be_rx_obj *rxo;
	struct be_tx_obj *txo;
	unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		pkts += rx_stats(rxo)->rx_pkts;
		bytes += rx_stats(rxo)->rx_bytes;
		mcast += rx_stats(rxo)->rx_mcast_pkts;
		drops += rx_stats(rxo)->rx_drops_no_skbs +
				rx_stats(rxo)->rx_drops_no_frags;
	}
	stats->rx_packets = pkts;
	stats->rx_bytes = bytes;
	stats->multicast = mcast;
	stats->rx_dropped = drops;

	pkts = 0;
	bytes = 0;
	for_all_tx_queues(adapter, txo, i) {
		pkts += tx_stats(txo)->tx_pkts;
		bytes += tx_stats(txo)->tx_bytes;
	}
	stats->tx_packets = pkts;
	stats->tx_bytes = bytes;

	be_get_error_stats(adapter, stats);

	return stats;
}

#else
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *dev,
						struct rtnl_link_stats64 *stats)
{
	struct be_adapter *adapter = netdev_priv(dev);
	struct be_rx_obj *rxo;
	struct be_tx_obj *txo;
	u64 pkts, bytes;
	unsigned int start;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		const struct be_rx_stats *rx_stats = rx_stats(rxo);
		do {
			start = u64_stats_fetch_begin_bh(&rx_stats->sync);
			pkts = rx_stats(rxo)->rx_pkts;
			bytes = rx_stats(rxo)->rx_bytes;
		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
		stats->rx_packets += pkts;
		stats->rx_bytes += bytes;
		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
				rx_stats(rxo)->rx_drops_no_frags;
	}

	for_all_tx_queues(adapter, txo, i) {
		const struct be_tx_stats *tx_stats = tx_stats(txo);
		do {
			start = u64_stats_fetch_begin_bh(&tx_stats->sync);
			pkts = tx_stats(txo)->tx_pkts;
			bytes = tx_stats(txo)->tx_bytes;
		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
		stats->tx_packets += pkts;
		stats->tx_bytes += bytes;
	}

	be_get_error_stats(adapter, stats);
	return stats;
}
#endif

void be_link_status_update(struct be_adapter *adapter, u8 link_status)
{
	struct net_device *netdev = adapter->netdev;

	if (link_status) {
		netif_carrier_on(netdev);
		dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
	} else {
		netif_carrier_off(netdev);
		dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
	}
}

static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
{
	struct be_tx_stats *stats = tx_stats(txo);

	u64_stats_update_begin(&stats->sync);
	stats->tx_reqs++;
	stats->tx_bytes += skb->len;
	stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
	u64_stats_update_end(&stats->sync);
}

/* Returns number of WRBs needed for the skb */
static u32 skb_wrb_cnt(struct sk_buff *skb)
{
	/* +1 for the header wrb */
	return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
}

static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
{
	wrb->frag_pa_hi = upper_32_bits(addr);
	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
	wrb->rsvd0 = 0;
}

static bool be_ipv6_exthdr_check(struct sk_buff *skb)
{
	struct ethhdr *eh = (struct ethhdr *)skb->data;
	u16 offset = ETH_HLEN;

	if (eh->h_proto == htons(ETH_P_IPV6)) {
		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);

		offset += sizeof(struct ipv6hdr);
		if (ip6h->nexthdr != NEXTHDR_TCP &&
		    ip6h->nexthdr != NEXTHDR_UDP) {
			struct ipv6_opt_hdr *ehdr =
				(struct ipv6_opt_hdr *) (skb->data + offset);

			/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
			if (ehdr->hdrlen == 0xff)
				return true;
		}
	}
	return false;
}

static int be3_A1(struct be_adapter *adapter)
{
	return (BE3_chip(adapter) &&
		adapter->asic_rev < 2);
}

static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
	return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}

static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
	return be3_A1(adapter) &&
		be_vlan_tag_chk(adapter, skb) &&
		be_ipv6_exthdr_check(skb);
}

static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
			 struct sk_buff *skb, u32 wrb_cnt, u32 len,
			 bool skip_hw_vlan, bool os2bmc)
{
	u16 vlan_tag = 0;

	memset(hdr, 0, sizeof(*hdr));

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);

	if (skb_is_gso(skb)) {
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);

		if (lancer_chip(adapter) &&
			skb_shinfo(skb)->gso_size < LANCER_TSO_MIN_MSS)
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
						hdr, LANCER_TSO_MIN_MSS);
		else
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
					hdr, skb_shinfo(skb)->gso_size);

		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);

	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		if (is_tcp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
		else if (is_udp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
	}

	if (vlan_tx_tag_present(skb)) {
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
	}

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);

	/* To skip HW VLAN tagging: evt = 1, compl = 0
	 * Otherwise, the evt bit is set while ringing DB
	 */
	if (skip_hw_vlan)
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
	if (os2bmc)
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, mgmt, hdr, 1);
}

static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
		bool unmap_single)
{
	dma_addr_t dma;

	be_dws_le_to_cpu(wrb, sizeof(*wrb));

	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
	if (wrb->frag_len) {
		if (unmap_single)
			dma_unmap_single(dev, dma, wrb->frag_len,
					 DMA_TO_DEVICE);
		else
			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
	}
}

static u32 make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
			struct sk_buff *skb, bool skip_hw_vlan, bool os2bmc)
{
	dma_addr_t busaddr;
	u32 i, copied = 0, wrb_cnt;
	struct device *dev = &adapter->pdev->dev;
	struct be_eth_wrb *wrb;
	struct be_eth_hdr_wrb *hdr;

	hdr = queue_head_node(txq);
	queue_head_inc(txq);
	wrb_cnt = skb_wrb_cnt(skb);

	if (skb->len > skb->data_len) {
		int len = skb_headlen(skb);
		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
		wrb = queue_head_node(txq);
		wrb_fill(wrb, busaddr, len);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
		copied += len;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		const struct skb_frag_struct *frag =
			&skb_shinfo(skb)->frags[i];
		busaddr = skb_frag_dma_map(dev, frag, 0,
					   skb_frag_size(frag), DMA_TO_DEVICE);
		wrb = queue_head_node(txq);
		wrb_fill(wrb, busaddr, skb_frag_size(frag));
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
		copied += skb_frag_size(frag);
	}

	wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, copied, skip_hw_vlan, os2bmc);
	be_dws_cpu_to_le(hdr, sizeof(*hdr));

	return wrb_cnt;
}

/* Is BE in a multi-channel mode */
static inline bool be_is_mc(struct be_adapter *adapter)
{
	return adapter->mc_type > MC_NONE;
}

static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
				    struct sk_buff *skb, bool *skip_hw_vlan)
{
	u16 vlan_tag = 0;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return skb;

	if (vlan_tx_tag_present(skb)) {
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
		be_reset_skb_tx_vlan(skb);
	}

	if (adapter->qnq_async_evt && adapter->pvid) {
		if (!vlan_tag)
			vlan_tag = adapter->pvid;
		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
		 * skip VLAN insertion
		 */
		if (skip_hw_vlan)
			*skip_hw_vlan = true;
	}

	if (vlan_tag) {
		skb = __vlan_put_tag_fixed(skb, htons(ETH_P_8021Q), vlan_tag);
		if (unlikely(!skb))
			return skb;
	}
	/* Insert the outer VLAN, if any */
	if (adapter->qnq_vid) {
		vlan_tag = adapter->qnq_vid;
		skb = __vlan_put_tag_fixed(skb, htons(ETH_P_8021Q), vlan_tag);
		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
		 * skip VLAN insertion
		 */
		if (skip_hw_vlan)
			*skip_hw_vlan = true;
	}

	return skb;
}

/* Mac hash key function */
static inline u8 mac_to_idx(u8 mac_addr[])
{
	return (mac_addr[1] & 7) |
		((mac_addr[2] & 3) << 3) |
		((mac_addr[3] & 1) << 5);
}

static u16 be_get_vlan_tag(struct be_adapter *adapter, struct sk_buff *skb)
{
	u16 vlan_tag = 0;
	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)(skb->data);

	if (veh->h_vlan_proto == htons(ETH_P_8021Q))
		vlan_tag = ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK;
	else if (vlan_tx_tag_present(skb))
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);

	return vlan_tag;
}

static inline void be_del_mac_node(struct be_adapter *adapter,
				   struct mac_node *node)
{
	spin_lock_bh(&adapter->mac_hash_lock);
	hlist_del(&node->list);
	spin_unlock_bh(&adapter->mac_hash_lock);
	kfree(node);
} 

/* Worker function to unlearn macs that are learnt */
static void be_del_mac_worker(struct work_struct *work)
{
	int status;
	struct mac_work *work_unlearn = container_of(work,
						    struct mac_work,
						    mac_addr_work);
	struct mac_node *node = work_unlearn->node;
	struct be_adapter *adapter = work_unlearn->adapter;

	status = be_cmd_pmac_del(adapter,
				 adapter->if_handle,
				 node->pmac_id, 0);

	if (!status)
		adapter->flags |= BE_FLAGS_MAC_LEARNING_ENABLED;

	kfree(node);
	kfree(work_unlearn);
}

/* Worker function to program macs that are learnt */
static void be_add_mac_worker(struct work_struct *work)
{
	struct mac_work *work_lrn = container_of(work,
					       struct mac_work, mac_addr_work);
	struct be_adapter *adapter = work_lrn->adapter;
	struct mac_node *node = work_lrn->node;
	int status = 0;

	status = be_cmd_pmac_add(adapter, (u8 *)node->mac,
				 adapter->if_handle,
				 &node->pmac_id, 0);

	if (extd_status(status) == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
		adapter->flags &= ~BE_FLAGS_MAC_LEARNING_ENABLED;

	kfree(work_lrn);
}

/* Keep checking each mac's age for every second and unlearn him if not
 * active for more than 5 minutes.
 */
static void be_evict_mac(struct be_adapter *adapter)
{
	struct mac_node *node = NULL;
	struct hlist_node *pos, *n;
	struct hlist_head *head;
	unsigned int age_msec, i;
	unsigned long curr_time;
	int status;

	for (i = 0; i < BE_MAX_MAC; i++) {
		head = &adapter->machash.head[i];
		hlist_for_each_entry_safe(node, pos, n, head, list) {
			curr_time = jiffies;
			if (time_before(curr_time, node->jiffies))
				continue;
			age_msec = jiffies_to_msecs(curr_time - \
						    node->jiffies);
			if (age_msec >= EVICT_TIMEOUT) {
				status = be_cmd_pmac_del(adapter,
							 adapter->if_handle,
							 node->pmac_id, 0);
				if (!status) {
					be_del_mac_node(adapter, node);
					adapter->flags |=
						BE_FLAGS_MAC_LEARNING_ENABLED;
				}
			}
		}
	}
}

static bool queue_del_mac(struct be_adapter *adapter, struct mac_node *node)
{
	struct mac_work *work = NULL;

	work = kzalloc(sizeof(struct mac_work), GFP_ATOMIC);
	if (!work)
		return false;
	work->adapter = adapter;
	work->node = node;

	INIT_WORK(&work->mac_addr_work, be_del_mac_worker);
	queue_work(adapter->workq, &work->mac_addr_work);
	return true;
}

static void be_unlearn_mac(struct be_adapter *adapter,
				 struct sk_buff *skb)
{
	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
	u8 hashidx = 0;
	struct hlist_node *pos, *n;
	struct hlist_head *head;
	struct mac_node *node = NULL;
	u16 vlan_tag = 0;

	vlan_tag = be_get_vlan_tag(adapter, skb);
	hashidx = mac_to_idx(ethhdr->h_source);
	head = &adapter->machash.head[hashidx];

	spin_lock_bh(&adapter->mac_hash_lock);
	hlist_for_each_entry_safe(node, pos, n, head, list) {
		if (be_mac_vlan_matches(node, ethhdr)) {
			hlist_del(&node->list);
			if (!queue_del_mac(adapter, node))
				hlist_add_head(&node->list, head);
			goto done;
		}
	}
	spin_unlock_bh(&adapter->mac_hash_lock);
done:
	return;
}

static bool be_check_add_mac_node(struct be_adapter *adapter,
				  struct sk_buff *skb,
				  struct mac_node **node,
				  struct hlist_head *head)
{
	struct hlist_node *pos, *n;
	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
	u16 vlan_tag = 0;

	vlan_tag = be_get_vlan_tag(adapter, skb);
	spin_lock_bh(&adapter->mac_hash_lock);
	hlist_for_each_entry_safe((*node), pos, n, head, list) {
		if (be_mac_matches((*node), ethhdr)) {
			(*node)->jiffies = jiffies;
			spin_unlock_bh(&adapter->mac_hash_lock);
			return BE_MAC_HIT;
		}
	}
	*node = kzalloc(sizeof(struct mac_node), GFP_ATOMIC);
	if (!*node)
		goto err;

	memcpy((*node)->mac, ethhdr->h_source, ETH_ALEN);
	(*node)->jiffies = jiffies;
	(*node)->vlan_tag = vlan_tag;
	/* Queue new macs to be programmed to a work queue */
	hlist_add_head(&(*node)->list, head);
err:
	spin_unlock_bh(&adapter->mac_hash_lock);
	return BE_MAC_MISS;
}

static bool queue_add_mac(struct be_adapter *adapter, struct mac_node *node)
{
	struct mac_work *work = NULL;
	work = kzalloc(sizeof(struct mac_work), GFP_ATOMIC);
	if (!work)
		return false;
	work->adapter = adapter;
	work->node = node;

	INIT_WORK(&work->mac_addr_work, be_add_mac_worker);
	queue_work(adapter->workq, &work->mac_addr_work);
	return true;
}

/* Hypervisor OSes configure the port in promiscuous (MAC and VLAN)
 * mode and they do not program any host VLANs in the ASIC. So,
 * when a packet arrives in the port with a host VLAN ID, the ASIC
 * cannot determine which ring to send the packet to, so sends the
 * packet to all the PFs. Learning mac when the packet arrives and
 * programming mac if it is not programmed already will solve it.
 */
static void be_learn_mac(struct be_adapter *adapter,
				     struct sk_buff *skb)
{
	struct hlist_head *head;
	struct mac_node *node = NULL;
	u8 hashidx = 0;
	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;

	if (!memcmp((char *)adapter->netdev->dev_addr, (char *)ethhdr->h_source,
		    ETH_ALEN))
		goto done;

	hashidx = mac_to_idx(ethhdr->h_source);
	head = &adapter->machash.head[hashidx];

	if (be_check_add_mac_node(adapter, skb, &node, head) ||
	    !node)
		goto done;

	if (queue_add_mac(adapter, node))
		goto done;

	be_del_mac_node(adapter, node);
done:
	return;
}

static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
					   struct sk_buff *skb, bool *skip_hw_vlan)
{
	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
	struct iphdr *ip = NULL;
	unsigned int eth_hdr_len;

	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
			VLAN_ETH_HLEN : ETH_HLEN;

	/* IPv6 TSO requests with extension hdrs are a problem to the HW.
	 * Just drop them.
	 */
	if (unlikely((skb_shinfo(skb)->gso_segs > 1) &&
		skb_shinfo(skb)->gso_size && is_ipv6_ext_hdr(skb)))
		goto tx_drop;

	/* For padded packets, BE HW modifies tot_len field in IP header
	 * incorrecly when VLAN tag is inserted by HW.
	 * For padded packets, Lancer computes incorrect checksum.
	 */
	if (skb->len <= 60 &&
	    (lancer_chip(adapter) || be_vlan_tag_chk(adapter, skb)) &&
	    is_ipv4_pkt(skb)) {
		ip = (struct iphdr *)ip_hdr(skb);
		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
	}

	/* HW may lockup when VLAN HW tagging is requested on
	 * certain ipv6 packets. Drop such pkts if the HW workaround to
	 * skip HW tagging is not enabled by FW.
	 */
	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
		     !adapter->qnq_async_evt))
		goto tx_drop;

	/* If vlan tag is already inlined in the packet, skip HW VLAN
	 * tagging in pvid-tagging mode
	 */
	if (be_pvid_tagging_enabled(adapter) &&
	    veh->h_vlan_proto == htons(ETH_P_8021Q))
		*skip_hw_vlan = true;

	/* HW has a bug wherein it will calcuate CSUM for VLAN
	 * pkts even though it is disabled.
	 * Manually insert VLAN in pkt.
	 */
	if (!*skip_hw_vlan && (skb->ip_summed != CHECKSUM_PARTIAL) &&
	    be_vlan_tag_chk(adapter, skb)) {
		skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
		if (unlikely(!skb))
			return NULL;
	}

	/* Manual VLAN tag insertion to prevent:
	 * ASIC lockup when the ASIC inserts VLAN tag into
	 * certain ipv6 packets. Insert VLAN tags in driver,
	 * and set event, completion, vlan bits accordingly
	 * in the Tx WRB.
	 */
	if (!*skip_hw_vlan && be_ipv6_tx_stall_chk(adapter, skb)) {
		skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
		if (unlikely(!skb))
			return NULL;
	}

	return skb;
tx_drop:
	dev_kfree_skb_any(skb);
	return NULL;
}

static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
					   struct sk_buff *skb, bool *skip_hw_vlan)
{
	unsigned int eth_hdr_len;

	/* Don't allow non-TSO packets longer than MTU */
	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
			VLAN_ETH_HLEN : ETH_HLEN;
	if (!skb_is_gso(skb) &&
		((skb->len - eth_hdr_len) > adapter->netdev->mtu))
		goto tx_drop;

	/* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
	 * may cause a transmit stall on that port. So the work-around is to
	 * pad short packets (<= 32 bytes) to a 36-byte length.
	 */
	if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
		if (skb_padto(skb, 36))
			return NULL;
		skb->len = 36;
	}

	if (BEx_chip(adapter) || lancer_chip(adapter)) {
		skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
		if (!skb)
			return NULL;
	}

	/* The stack can send us skbs with length more than 65535
	 * BE cannot handle such requests. Hack the extra data and drop it.
	 */
	if (skb->len > 65535) {
		int err = __pskb_trim(skb, 65535);
		BUG_ON(err);
	}

	return skb;
tx_drop:
	dev_kfree_skb_any(skb);
	return NULL;
}

static void be_xmit_finish(struct be_adapter *adapter, struct be_tx_obj *txo)
{
	struct be_queue_info *txq = &txo->q;
	struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_hdr);
	u16 wrb_cnt = txo->pend_wrb_cnt ? txo->pend_wrb_cnt : txo->last_wrb_cnt;

	/* Mark the last request eventable if it hasn't been marked already */
	if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
		hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);

	/* compose a dummy wrb if there are odd set of wrbs to notify */
	if (!lancer_chip(adapter) && (wrb_cnt & 1)) {
		wrb_fill(queue_head_node(txq), 0, 0);
		queue_head_inc(txq);
		atomic_inc(&txq->used);
		wrb_cnt++;
		hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
					   TX_HDR_WRB_NUM_SHIFT);
		hdr->dw[2] |= cpu_to_le32((txo->last_wrb_cnt + 1) <<
					  TX_HDR_WRB_NUM_SHIFT);
	}
	be_txq_notify(adapter, txo, wrb_cnt);
 	be_update_xmit_trans_start(adapter->netdev, txo->idx);
	txo->pend_wrb_cnt = 0;
}

static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
			       struct sk_buff **skb, bool *os2bmc)
{
	struct ethhdr *eh = (struct ethhdr *)(*skb)->data;

	if (!is_be_os2bmc_enabled(adapter) || *os2bmc) {
		*os2bmc = false;
		goto done;
	}

	if (!is_multicast_ether_addr(eh->h_dest))
		goto done;

	if (is_mc_allowed_on_bmc(adapter, eh) ||
	    is_bc_allowed_on_bmc(adapter, eh) ||
	    is_arp_allowed_on_bmc(adapter, (*skb))) {
		*os2bmc = true;
		goto done;
	}

	if ((*skb)->protocol == htons(ETH_P_IPV6)) {
		struct ipv6hdr *hdr = ipv6_hdr((*skb));
		u8 nexthdr = hdr->nexthdr;
		if (nexthdr == IPPROTO_ICMPV6) {
			struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
			switch (icmp6->icmp6_type) {
			case NDISC_ROUTER_ADVERTISEMENT:
				*os2bmc = is_ipv6_ra_filt_enabled(adapter);
				goto done;
			case NDISC_NEIGHBOUR_ADVERTISEMENT:
				*os2bmc = is_ipv6_na_filt_enabled(adapter);
				goto done;
			default:
				break;
			}
		}
	}

	if (is_udp_pkt((*skb))) {
		struct udphdr *udp = udp_hdr((*skb));
		switch (udp->dest) {
		case DHCP_CLIENT_PORT:
			*os2bmc = is_dhcp_client_filt_enabled(adapter);
			goto done;
		case DHCP_SERVER_PORT:
			*os2bmc = is_dhcp_srvr_filt_enabled(adapter);
			goto done;
		case NET_BIOS_PORT1:
		case NET_BIOS_PORT2:
			*os2bmc = is_nbios_filt_enabled(adapter);
			goto done;
		case DHCPV6_RAS_PORT:
			*os2bmc = is_ipv6_ras_filt_enabled(adapter);
			goto done;
		default:
			break;
		}
	}
done:
	if (*os2bmc)
		*skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);

	return *os2bmc;
}

static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
	struct be_queue_info *txq = &txo->q;
	u32 wrb_cnt;
	bool skip_hw_vlan = false, os2bmc = false;

	skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
	if (!skb) {
		tx_stats(txo)->tx_drv_drops++;
		return NETDEV_TX_OK;
	}

	/* BHs are already disabled by transmitting context */
	spin_lock(&txo->tx_lock);

xmit_start:
	txo->last_hdr = txq->head;
	BUG_ON(txo->sent_skb_list[txo->last_hdr]);

	wrb_cnt = make_tx_wrbs(adapter, txq, skb, skip_hw_vlan, os2bmc);

	txo->sent_skb_list[txo->last_hdr] = skb;
	atomic_add(wrb_cnt, &txq->used);
	txo->last_wrb_cnt = wrb_cnt;

	if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
		netif_stop_subqueue(netdev, txo->idx);
		tx_stats(txo)->tx_stops++;
	}

	/* When there are enough pending compls, delay ringing DB */
	if (txo->pend_wrb_cnt || atomic_read(&txq->used) > START_BATCHING_WM)
		txo->pend_wrb_cnt += wrb_cnt;

	/* if os2bmc is not enabled or if the pkt is destined to bmc, do nothing
	 */
	if (be_send_pkt_to_bmc(adapter, &skb, &os2bmc))
		skb_get(skb);

	/* When there are enough pending transmits, flush them */
	if (txo->pend_wrb_cnt == 0 || txo->pend_wrb_cnt > MAX_BATCH_SIZE)
		be_xmit_finish(adapter, txo);

	be_tx_stats_update(txo, skb);

	if (os2bmc)
		goto xmit_start;

	if (be_is_mac_learning_enabled(adapter))
		be_learn_mac(adapter, skb);

	spin_unlock(&txo->tx_lock);

	return NETDEV_TX_OK;
}

static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	if (new_mtu < BE_MIN_MTU ||
			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
					(ETH_HLEN + ETH_FCS_LEN))) {
		dev_info(&adapter->pdev->dev,
			"MTU must be between %d and %d bytes\n",
			BE_MIN_MTU,
			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
		return -EINVAL;
	}
	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
			netdev->mtu, new_mtu);
	netdev->mtu = new_mtu;
	return 0;
}

/*
 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 * If the user configures more, place BE in vlan promiscuous mode.
 */
static int be_vid_config(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	u16 vids[BE_NUM_VLANS_SUPPORTED];
	u16 num = 0, i;
	int status = 0;

	/* No need to change the VLAN state if the I/F is in promiscous */
	if(adapter->promiscuous)
		return 0;

	if (adapter->vlans_added > be_max_vlans(adapter))
		goto set_vlan_promisc;

	/* Construct VLAN Table to give to HW */
	for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++)
		if (adapter->vlan_tag[i])
			vids[num++] = cpu_to_le16(i);

	status = be_cmd_vlan_config(adapter, adapter->if_handle,
				    vids, num, 0);

	if (status) {
		/* Set to VLAN promisc mode as setting VLAN filter failed */
		if (extd_status(status) == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
			goto set_vlan_promisc;
		dev_err(dev, "Setting HW VLAN filtering failed\n");
	} else {
		if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
			/* hw VLAN filtering re-enabled. */
			status = be_cmd_rx_filter(adapter,
						  BE_FLAGS_VLAN_PROMISC, OFF);
			if (!status) {
				dev_info(dev,
					 "Disabling VLAN Promiscuous mode\n");
				adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
			}
		}
	}

	return status;

set_vlan_promisc:
	if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
		return 0;

	dev_warn(dev, "Exhausted VLAN HW filters\n");
	status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
	if (!status) {
		dev_info(dev, "Enable VLAN Promiscuous mode\n");
		adapter->flags |= BE_FLAGS_VLAN_PROMISC;
	} else
		dev_err(dev, "Could not enable VLAN Promiscuous mode\n");
	return status;
}

#ifndef USE_NEW_VLAN_MODEL
static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
{
	struct be_adapter *adapter = netdev_priv(netdev);

	adapter->vlan_grp = grp;
}
#endif

static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	int status = 0;

	/* Packets with VID 0 are always received by Lancer by default */
	if (lancer_chip(adapter) && vid == 0)
		return status;

	/* Return if VLAN is already programmed */
	if (adapter->vlan_tag[vid])
		return status;

	adapter->vlan_tag[vid] = 1;
	adapter->vlans_added++;

	status = be_vid_config(adapter);
	if (status) {
		adapter->vlans_added--;
		adapter->vlan_tag[vid] = 0;
	}

	return status;
}

static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	int status = 0;

	if (lancer_chip(adapter) && vid == 0)
		goto ret;

	vlan_group_set_device(adapter->vlan_grp, vid, NULL);

	adapter->vlan_tag[vid] = 0;
	status = be_vid_config(adapter);
	if (!status)
		adapter->vlans_added--;
	else
		adapter->vlan_tag[vid] = 1;
ret:
	return status;
}

static void be_clear_promisc(struct be_adapter *adapter)
{
	adapter->promiscuous = false;
	adapter->flags &= ~(BE_FLAGS_MCAST_PROMISC | BE_FLAGS_VLAN_PROMISC);

	be_cmd_rx_filter(adapter, BE_FLAGS_PROMISC, OFF);
}

static void be_set_rx_mode(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct device *dev = &adapter->pdev->dev;
	int status;

	if (netdev->flags & IFF_PROMISC) {
		be_cmd_rx_filter(adapter, BE_FLAGS_PROMISC, ON);
		adapter->promiscuous = true;
		goto done;
	}

	/* BE was previously in promiscous mode; disable it */
	if (adapter->promiscuous) {
		be_clear_promisc(adapter);
		if (adapter->vlans_added)
			be_vid_config(adapter);
	}

	/* Enable multicast promisc if num configured exceeds what we support */
	if (netdev->flags & IFF_ALLMULTI || 
	    netdev_mc_count(netdev) > be_max_mc(adapter)) {
		goto set_mcast_promisc;
	}

#ifdef NETDEV_UC_defined
	if (netdev_uc_count(netdev) != adapter->uc_macs) {
		struct netdev_hw_addr *ha;
		int i = 1; /* First slot is claimed by the Primary MAC */

		for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
			be_cmd_pmac_del(adapter, adapter->if_handle,
					adapter->pmac_id[i], 0);
		}

		if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
			be_cmd_rx_filter(adapter, BE_FLAGS_PROMISC, ON);
			adapter->promiscuous = true;
			goto done;
		}

		netdev_for_each_uc_addr(ha, adapter->netdev) {
			adapter->uc_macs++; /* First slot is for Primary MAC */
			be_cmd_pmac_add(adapter, (u8 *)ha->addr,
					adapter->if_handle,
					&adapter->pmac_id[adapter->uc_macs], 0);
		}
	}
#endif
	status = be_cmd_rx_filter(adapter, BE_FLAGS_MCAST, ON);
	if (!status) {
		if (adapter->flags & BE_FLAGS_MCAST_PROMISC) {
			dev_info(dev, "Re-Enabling HW multicast filtering\n");
			adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
		}
		goto done;
	}

set_mcast_promisc:
	if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
		return;

	/* Set to MCAST promisc mode if setting MULTICAST address fails
	 * or if num configured exceeds what we support
	 */
	dev_info(dev, "Exhausted multicast HW filters. ");
	status = be_cmd_rx_filter(adapter, BE_FLAGS_MCAST_PROMISC, ON);
	if (!status) {
		dev_info(dev, "Disabling HW multicast filtering\n");
		adapter->flags |= BE_FLAGS_MCAST_PROMISC;
	}
done:
	return;
}

#ifdef NDO_VF_CFG_defined
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
	int status;

	if (!sriov_enabled(adapter))
		return -EPERM;

	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
		return -EINVAL;

	if (BEx_chip(adapter)) {
		be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
				vf + 1);

		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
					 &vf_cfg->pmac_id, vf + 1);
	} else {
		status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
					vf + 1);
	}

	if (status)
		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
				mac, vf);
	else
		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);

	return status;
}

static void be_query_vf_rate(struct be_adapter *adapter, int vf)
{
	int status;
	u16 lnk_speed;

	status = be_cmd_link_status_query(adapter, &lnk_speed, NULL, vf + 1);
	if (!status)
		adapter->vf_cfg[vf].tx_rate = lnk_speed;
}

static int be_get_vf_config(struct net_device *netdev, int vf,
			struct ifla_vf_info *vi)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];

	if (!sriov_enabled(adapter))
		return -EPERM;

	if (vf >= adapter->num_vfs)
		return -EINVAL;

	be_query_vf_rate(adapter, vf);

	vi->vf = vf;
	vi->tx_rate = vf_cfg->tx_rate;
	vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
	vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
#ifdef NDO_VF_LINK_STATE_defined
	vi->linkstate = adapter->vf_cfg[vf].linkstate;
#endif /*NDO_VF_LINK_STATE_defined*/
	return 0;
}

/*
 * Entry point to configure vlan behavior for a VF.
 * 1. By default a VF has FILTMGMT privilege.
 * 2. It may or may not have Transparent Tagging enabled.
 * 3. To disable the current Transparent Tagging for a VF:
 *   3a. run the last iproute command with vlan set to 0.
 *   3b. programing vid 0xFFFF will disable Transparent Tagging in ARM/ASIC
 */
static int be_set_vf_vlan(struct net_device *netdev,
			int vf, u16 vlan, u8 qos)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
	int status = 0;

	if (!sriov_enabled(adapter))
		return -EPERM;

	if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
		return -EINVAL;

	if (vlan || qos) {
		vlan |= qos << VLAN_PRIO_SHIFT;
		if (vf_cfg->vlan_tag != vlan)
			status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
						       vf_cfg->if_handle, 0);
	} else {
		/* Reset Transparent Vlan Tagging. */
		status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
					       vf + 1, vf_cfg->if_handle, 0);
	}

	if (!status)
		vf_cfg->vlan_tag = vlan;
	else
		dev_info(&adapter->pdev->dev,
			 "VLAN %d config on VF %d failed\n", vlan, vf);
	return status;
}

static int be_set_vf_tx_rate(struct net_device *netdev,
			int vf, int rate)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	u16 link_speed;
	u8 lnk_status;
	int status = 0;

	if (!sriov_enabled(adapter))
		return -EPERM;

	if (vf >= adapter->num_vfs)
		return -EINVAL;

	status = be_cmd_link_status_query(adapter, &link_speed, &lnk_status, 0);
	if (status)
		goto err;

	if ((lnk_status & LINK_STATUS_MASK) == LINK_DOWN)
		goto err;

	if (rate < 100 || rate > link_speed) {
		dev_err(&adapter->pdev->dev,
			"tx rate must be between 100 and %d Mbps\n",
			link_speed);
		return -EINVAL;
	}

	if (BE3_chip(adapter))
		status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
	else
		status = be_cmd_set_profile_config(adapter,
						   (rate*100)/link_speed,
						   vf + 1);

	if (status)
		goto err;

	adapter->vf_cfg[vf].tx_rate = rate;
	return 0;

err:
	dev_err(&adapter->pdev->dev,
		"Set Tx Rate %d on VF %d failed\n", rate, vf);
	return status;
}
#endif /* NDO_VF_CFG_defined */

#ifdef NDO_VF_LINK_STATE_defined
static int be_set_vf_link_state(struct net_device *netdev, int vf,
				int link_state)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	int status;
	bool plink_tracking = 0;
	bool link_enable = 0;

	if (!sriov_enabled(adapter))
		return -EPERM;

	if (vf >= adapter->num_vfs)
		return -EINVAL;

	if (link_state == IFLA_VF_LINK_STATE_AUTO)
		plink_tracking = 1;
	else
		link_enable = (link_state == IFLA_VF_LINK_STATE_ENABLE);

	status = be_cmd_set_logical_link_config(adapter, plink_tracking,
						link_enable, vf+1);
	if (!status)
		adapter->vf_cfg[vf].linkstate = link_state;
	return status;
}
#endif /*NDO_VF_LINK_STATE_defined*/

static void be_aic_update(struct be_aic_obj *aic, u64 rxpkts, u64 txpkts,
			  ulong now)
{
	aic->rx_pkts_prev = rxpkts;
	aic->tx_reqs_prev = txpkts;
	aic->jiffies = now;
}

static void be_eqd_update(struct be_adapter *adapter)
{
	struct be_set_eqd set_eqd[MAX_EVT_QS];
	int eqd, i, num = 0, start;
	struct be_aic_obj *aic;
	struct be_eq_obj *eqo;
	struct be_rx_obj *rxo;
	struct be_tx_obj *txo;
	u64 rxpkts, txpkts;
	ulong now;
	u32 pps, delta;

	for_all_evt_queues(adapter, eqo, i) {
		aic = &adapter->aic_obj[eqo->idx];
		if (!aic->enable) {
			if (aic->jiffies)
				aic->jiffies = 0;
			eqd = aic->et_eqd;
			goto modify_eqd;
		}

		rxo = &adapter->rx_obj[eqo->idx];
		do {
			start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
			rxpkts = rxo->stats.rx_pkts;
		} while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));

		txo = &adapter->tx_obj[eqo->idx];
		do {
			start = u64_stats_fetch_begin_bh(&txo->stats.sync);
			txpkts = txo->stats.tx_reqs;
		} while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));


		/* Skip, if wrapped around or first calculation */
		now = jiffies;
		if (!aic->jiffies || time_before(now, aic->jiffies) ||
		    rxpkts < aic->rx_pkts_prev || txpkts < aic->tx_reqs_prev) {
			be_aic_update(aic, rxpkts, txpkts, now);
			continue;
		}

		delta = jiffies_to_msecs(now - aic->jiffies);
		pps = (((u32)(rxpkts - aic->rx_pkts_prev) * 1000) / delta) +
			(((u32)(txpkts - aic->tx_reqs_prev) * 1000) / delta);
		eqd = (pps / 15000) << 2;

		if (eqd < 8)
			eqd = 0;
		eqd = min_t(u32, eqd, aic->max_eqd);
		eqd = max_t(u32, eqd, aic->min_eqd);

		be_aic_update(aic, rxpkts, txpkts, now);
modify_eqd:
		if (eqd != aic->prev_eqd) {
			set_eqd[num].delay_multiplier = (eqd * 65)/100;
			set_eqd[num].eq_id = eqo->q.id;	
			aic->prev_eqd = eqd;
			num++;
		}
	}

	if (num)
		be_cmd_modify_eqd(adapter, set_eqd, num);
}

static void be_rx_stats_update(struct be_rx_obj *rxo,
		struct be_rx_compl_info *rxcp)
{
	struct be_rx_stats *stats = rx_stats(rxo);

	u64_stats_update_begin(&stats->sync);
	stats->rx_compl++;
	stats->rx_bytes += rxcp->pkt_size;
	stats->rx_pkts++;
	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
		stats->rx_mcast_pkts++;
	if (rxcp->err)
		stats->rx_compl_err++;
	u64_stats_update_end(&stats->sync);
}

static inline bool csum_passed(struct be_rx_compl_info *rxcp)
{
	/* L4 checksum is not reliable for non TCP/UDP packets.
	 * Also ignore ipcksm for ipv6 pkts */
	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
				(rxcp->ip_csum || rxcp->ipv6);
}

static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
{
	struct be_adapter *adapter = rxo->adapter;
	struct be_rx_page_info *rx_page_info;
	struct be_queue_info *rxq = &rxo->q;
	u16 frag_idx = rxq->tail;

	rx_page_info = &rxo->page_info_tbl[frag_idx];
	if (!rx_page_info->page) {
		printk(KERN_EMERG "curr_idx=%d prev_dix=%d rxq->head=%d\n",
			frag_idx, rxo->prev_frag_idx, rxq->head);
		BUG_ON(!rx_page_info->page);
	}

	if (rx_page_info->last_page_user) {
		pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
			adapter->big_page_size, PCI_DMA_FROMDEVICE);
		rx_page_info->last_page_user = false;
	}

	rxo->prev_frag_idx = frag_idx;

	queue_tail_inc(rxq);
	atomic_dec(&rxq->used);
	return rx_page_info;
}

/* Throwaway the data in the Rx completion */
static void be_rx_compl_discard(struct be_rx_obj *rxo,
				struct be_rx_compl_info *rxcp)
{
	struct be_rx_page_info *page_info;
	u16 i, num_rcvd = rxcp->num_rcvd;

	for (i = 0; i < num_rcvd; i++) {
		page_info = get_rx_page_info(rxo);
		put_page(page_info->page);
		memset(page_info, 0, sizeof(*page_info));
	}
}

/*
 * skb_fill_rx_data forms a complete skb for an ether frame indicated by rxcp.
 */
static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
			     struct be_rx_compl_info *rxcp)
{
	struct be_rx_page_info *page_info;
	struct be_adapter *adapter = rxo->adapter;
	u16 i, j, hdr_len, curr_frag_len, remaining;
	skb_frag_t *frag;
	u8 *start;

	page_info = get_rx_page_info(rxo);
	start = page_address(page_info->page) + page_info->page_offset;
	prefetch(start);

	/* Copy data in the first descriptor of this completion */
	curr_frag_len = min(rxcp->pkt_size, adapter->rx_frag_size);

	/* Copy the header portion into skb_data */
	hdr_len = min(BE_HDR_LEN, curr_frag_len);
	memcpy(skb->data, start, hdr_len);
	skb->len = curr_frag_len;
	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
		/* Complete packet has now been moved to data */
		put_page(page_info->page);
		skb->data_len = 0;
		skb->tail += curr_frag_len;
	} else {
		frag = &skb_shinfo(skb)->frags[0];
		skb_shinfo(skb)->nr_frags = 1;
		skb_frag_set_page(skb, 0, page_info->page);
		frag->page_offset = page_info->page_offset + hdr_len;
		skb_frag_size_set(frag, curr_frag_len - hdr_len);
		skb->data_len = curr_frag_len - hdr_len;
		skb->truesize += adapter->rx_frag_size;
		skb->tail += hdr_len;
	}
	page_info->page = NULL;

	if (rxcp->pkt_size <= adapter->rx_frag_size) {
		BUG_ON(rxcp->num_rcvd != 1);
		return;
	}

	/* More frags present for this completion */
	remaining = rxcp->pkt_size - curr_frag_len;
	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
		page_info = get_rx_page_info(rxo);
		curr_frag_len = min(remaining, adapter->rx_frag_size);
		frag = &skb_shinfo(skb)->frags[j];

		/* Coalesce all frags from the same physical page in one slot */
		if (page_info->page_offset == 0) {
			/* Fresh page */
			j++;
			frag++;
			BUG_ON(j > MAX_SKB_FRAGS);
			skb_frag_set_page(skb, j, page_info->page);
			frag->page_offset = page_info->page_offset;
			skb_frag_size_set(frag, 0);
			skb_shinfo(skb)->nr_frags++;
		} else {
			put_page(page_info->page);
		}

		skb_frag_size_add(frag, curr_frag_len);
		skb->len += curr_frag_len;
		skb->data_len += curr_frag_len;
		skb->truesize += adapter->rx_frag_size;
		remaining -= curr_frag_len;
		page_info->page = NULL;
	}
}

/* Process the RX completion indicated by rxcp when LRO is disabled */
static void be_rx_compl_process(struct be_rx_obj *rxo,
				struct be_rx_compl_info *rxcp)
{
	struct be_adapter *adapter = rxo->adapter;
	struct sk_buff *skb;

	skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_RX_SKB_ALLOC_SIZE);
	if (unlikely(!skb)) {
		rx_stats(rxo)->rx_drops_no_skbs++;
		be_rx_compl_discard(rxo, rxcp);
		return;
	}

	skb_fill_rx_data(rxo, skb, rxcp);

	if (likely(adapter->rx_csum && csum_passed(rxcp)))
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	else
		skb->ip_summed = CHECKSUM_NONE;

#ifndef NULL_VLAN_GRP_SAFE
	if (rxcp->vlanf && !adapter->vlan_grp) {
		__vlan_put_tag(skb, rxcp->vlan_tag);
		rxcp->vlanf = 0;
	}
#endif

	skb->protocol = eth_type_trans(skb, adapter->netdev);
	skb->dev = adapter->netdev;
	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
#ifdef RXHASH_defined
	if (adapter->netdev->features & NETIF_F_RXHASH)
		skb->rxhash = rxcp->rss_hash;
#endif

	/*
	 * Unlearn the mac that is already learnt just in case if
	 * the mac moves out of the machine.
	 */
	if (adapter->flags & BE_FLAGS_MAC_LEARNING_INITIALIZED)
		be_unlearn_mac(adapter, skb);

	if (rxcp->vlanf)
		vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
					 rxcp->vlan_tag);
	else
		netif_receive_skb(skb);

	return;
}

/* Process the RX completion indicated by rxcp when LRO is enabled */
static void be_rx_compl_process_lro(struct be_rx_obj *rxo,
			struct be_rx_compl_info *rxcp)
{
#ifndef GRO_defined
	struct be_adapter *adapter = rxo->adapter;
	struct be_rx_page_info *page_info;
	struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
	struct be_queue_info *rxq = &rxo->q;
	u16 remaining, curr_frag_len, i, j;

	remaining = rxcp->pkt_size;
	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
		page_info = get_rx_page_info(rxo);

		curr_frag_len = min(remaining, adapter->rx_frag_size);

		/* Coalesce all frags from the same physical page in one slot */
		if (i == 0 || page_info->page_offset == 0) {
			/* First frag or Fresh page */
			j++;
			rx_frags[j].page = page_info->page;
			rx_frags[j].page_offset = page_info->page_offset;
			rx_frags[j].size = 0;
		} else {
			put_page(page_info->page);
		}
		rx_frags[j].size += curr_frag_len;

		remaining -= curr_frag_len;
		memset(page_info, 0, sizeof(*page_info));
	}
	BUG_ON(j > MAX_SKB_FRAGS);

	if (likely(!rxcp->vlanf)) {
		lro_receive_frags(&rxo->lro_mgr, rx_frags, rxcp->pkt_size,
				rxcp->pkt_size, NULL, 0);
	} else {
		lro_vlan_hwaccel_receive_frags(&rxo->lro_mgr, rx_frags,
			rxcp->pkt_size, rxcp->pkt_size, adapter->vlan_grp,
			rxcp->vlan_tag, NULL, 0);
	}
#endif
	return;
}

/* Process the RX completion indicated by rxcp when GRO is enabled */
static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
				    struct napi_struct *napi,
				    struct be_rx_compl_info *rxcp)
{
#ifdef GRO_defined
	struct be_adapter *adapter = rxo->adapter;
	struct be_rx_page_info *page_info;
	struct sk_buff *skb = NULL;
	u16 remaining, curr_frag_len, i, j;
	skb_frag_t *frag = NULL;

	skb = napi_get_frags(napi);
	if (!skb) {
		be_rx_compl_discard(rxo, rxcp);
		return;
	}

	remaining = rxcp->pkt_size;
	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
		page_info = get_rx_page_info(rxo);

		curr_frag_len = min(remaining, adapter->rx_frag_size);

		/* Coalesce all frags from the same physical page in one slot */
		if (i == 0 || page_info->page_offset == 0) {
			/* First frag or Fresh page */
			j++;
			frag = &skb_shinfo(skb)->frags[j];
			skb_frag_set_page(skb, j, page_info->page);
			frag->page_offset = page_info->page_offset;
			skb_frag_size_set(frag, 0);
		} else {
			put_page(page_info->page);
		}
		skb_frag_size_add(frag, curr_frag_len);
		skb->truesize += adapter->rx_frag_size;
		remaining -= curr_frag_len;
		memset(page_info, 0, sizeof(*page_info));
	}
	BUG_ON(j > MAX_SKB_FRAGS);

	skb_shinfo(skb)->nr_frags = j + 1;
	skb->len = rxcp->pkt_size;
	skb->data_len = rxcp->pkt_size;
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	skb->dev = adapter->netdev;
	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
#ifdef RXHASH_defined
	if (adapter->netdev->features & NETIF_F_RXHASH)
		skb->rxhash = rxcp->rss_hash;
#endif

	if (likely(!rxcp->vlanf))
		napi_gro_frags(napi);
	else
		vlan_gro_frags(napi, adapter->vlan_grp, rxcp->vlan_tag);
#endif
}

static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
				 struct be_rx_compl_info *rxcp)
{
	rxcp->pkt_size =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
	rxcp->ip_csum =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
	rxcp->l4_csum =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
	rxcp->ipv6 =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
	rxcp->num_rcvd =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
	rxcp->pkt_type =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
	rxcp->rss_hash =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
	if (rxcp->vlanf) {
		rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
				compl);
		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
						vlan_tag, compl);
	}
	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
}

static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
				 struct be_rx_compl_info *rxcp)
{
	rxcp->pkt_size =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
	rxcp->ip_csum =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
	rxcp->l4_csum =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
	rxcp->ipv6 =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
	rxcp->num_rcvd =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
	rxcp->pkt_type =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
	rxcp->rss_hash =
		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
	if (rxcp->vlanf) {
		rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
				compl);
		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
						vlan_tag, compl);
	}
	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
	rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
						ip_frag, compl);
}

static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
{
	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
	struct be_rx_compl_info *rxcp = &rxo->rxcp;
	struct be_adapter *adapter = rxo->adapter;

	/* For checking the valid bit it is Ok to use either definition as the
	 * valid bit is at the same position in both v0 and v1 Rx compl */
	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
		return NULL;

	rmb();
	be_dws_le_to_cpu(compl, sizeof(*compl));

	if (adapter->be3_native)
		be_parse_rx_compl_v1(compl, rxcp);
	else
		be_parse_rx_compl_v0(compl, rxcp);

	if (rxcp->ip_frag)
		rxcp->l4_csum = 0;

	if (rxcp->vlanf) {
		/* In QNQ modes, if qnq bit is not set, then the packet was
		 * tagged only with the transparent outer vlan-tag and must
		 * not be treated as a vlan packet by host
		 */
		if (be_is_qnq_mode(adapter) && !rxcp->qnq)
			rxcp->vlanf = 0;

		if (!lancer_chip(adapter))
			rxcp->vlan_tag = swab16(rxcp->vlan_tag);

		if (adapter->pvid  == (rxcp->vlan_tag & VLAN_VID_MASK))
			rxcp->vlanf = 0;
	}

	/* As the compl has been parsed, reset it; we wont touch it again */
	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;

	queue_tail_inc(&rxo->cq);
	return rxcp;
}

static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
{
	u32 order = get_order(size);

	if (order > 0)
		gfp |= __GFP_COMP;
	return  alloc_pages(gfp, order);
}

/*
 * Allocate a page, split it to fragments of size rx_frag_size and post as
 * receive buffers to BE
 */
static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
{
	struct be_adapter *adapter = rxo->adapter;
	struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
	struct be_queue_info *rxq = &rxo->q;
	struct page *pagep = NULL;
	struct be_eth_rx_d *rxd;
	u64 page_dmaaddr = 0, frag_dmaaddr;
	u32 posted, page_offset = 0, notify = 0;

	page_info = &page_info_tbl[rxq->head];
	for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
		if (!pagep) {
			pagep = be_alloc_pages(adapter->big_page_size, gfp);
			if (unlikely(!pagep)) {
				rx_stats(rxo)->rx_post_fail++;
				break;
			}
			page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
						adapter->big_page_size,
						PCI_DMA_FROMDEVICE);
			page_info->page_offset = 0;
		} else {
			get_page(pagep);
			page_info->page_offset = page_offset +
						adapter->rx_frag_size;
		}
		page_offset = page_info->page_offset;
		page_info->page = pagep;
		pci_unmap_addr_set(page_info, bus, page_dmaaddr);
		frag_dmaaddr = page_dmaaddr + page_info->page_offset;

		rxd = queue_head_node(rxq);
		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));

		/* Any space left in the current big page for another frag? */
		if ((page_offset + adapter->rx_frag_size +
			adapter->rx_frag_size) >
					adapter->big_page_size) {
			pagep = NULL;
			page_info->last_page_user = true;
		}

		prev_page_info = page_info;
		queue_head_inc(rxq);
		page_info = &page_info_tbl[rxq->head];
	}
	if (pagep)
		prev_page_info->last_page_user = true;

	/* Ensure that posting buffers is the last thing done by this
	 * routine to avoid racing between rx bottom-half and
	 * be_worker (process) contexts.
	 */
	if (posted) {
		atomic_add(posted, &rxq->used);
		do {
			notify = min(256u, posted);
			be_rxq_notify(adapter, rxq->id, notify);
		} while (posted -= notify, posted);
	} else if (atomic_read(&rxq->used) == 0) {
		/* Let be_worker replenish when memory is available */
		rxo->rx_post_starved = true;
	}

	return;
}

static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
{
	struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);

	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
		return NULL;

	rmb();
	be_dws_le_to_cpu(txcp, sizeof(*txcp));

	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;

	queue_tail_inc(tx_cq);
	ND("Before returning tx_cq->tail = %d", tx_cq->tail);
	return txcp;
}

static u16 be_tx_compl_process(struct be_adapter *adapter,
		struct be_tx_obj *txo, u16 last_index)
{
	struct be_queue_info *txq = &txo->q;
	struct be_eth_wrb *wrb;
	struct sk_buff **sent_skbs = txo->sent_skb_list;
	struct sk_buff *skb = NULL;
	u16 frag_index, num_wrbs = 0;
	bool unmap_skb_hdr = false;
#ifdef DEV_NETMAP
	struct netmap_adapter *na = NA(adapter->netdev);

	if (na && (na->na_flags & NAF_NATIVE_ON)) {
		do {
			printk("txq->tail = %d last_index = %d\n",
				txq->tail, last_index);
			frag_index = txq->tail;
			queue_tail_inc(txq);  /* skip hdr wrb */
			queue_tail_inc(txq);  /* skip data wrb */	
			num_wrbs += 2;
		} while (frag_index != last_index);
		return num_wrbs;
       }
#endif /* DEV_NETMAP */
       do {
		if (sent_skbs[txq->tail]) {
			/* Free skb from prev req */
			if (skb)
				kfree_skb(skb);
			skb = sent_skbs[txq->tail];
			sent_skbs[txq->tail] = NULL;
			queue_tail_inc(txq);  /* skip hdr wrb */
			num_wrbs++;
			unmap_skb_hdr = true;
		}
		wrb = queue_tail_node(txq);
		frag_index = txq->tail;
		unmap_tx_frag(&adapter->pdev->dev, wrb,
			      (unmap_skb_hdr && skb_headlen(skb)));
		unmap_skb_hdr = false;
		queue_tail_inc(txq);
		num_wrbs++;
	} while (frag_index != last_index);
	kfree_skb(skb);

	return num_wrbs;
}

/* Return the number of events in the event queue */
static inline int events_get(struct be_eq_obj *eqo)
{
	struct be_eq_entry *eqe;
	int num = 0;

	do { 
		eqe = queue_tail_node(&eqo->q);
		if (eqe->evt == 0)
			break;

		rmb();
		eqe->evt = 0;
		num++;
		queue_tail_inc(&eqo->q);
	} while (true);

	return num;
}

/* Leaves the EQ is disarmed state */
static void be_eq_clean(struct be_eq_obj *eqo)
{
	int num = events_get(eqo);

	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
}

static void be_rx_cq_clean(struct be_rx_obj *rxo)
{
	struct be_rx_page_info *page_info;
	struct be_queue_info *rxq = &rxo->q;
	struct be_queue_info *rx_cq = &rxo->cq;
	struct be_rx_compl_info *rxcp;
	struct be_adapter *adapter = rxo->adapter;
	int flush_wait = 0;

	/* Consume pending rx completions.
	 * Wait for the flush completion (identified by zero num_rcvd)
	 * to arrive. Notify CQ even when there are no more CQ entries
	 * for HW to flush partially coalesced CQ entries.
	 * In Lancer, there is no need to wait for flush compl.
	 */
	for (;;) {
		rxcp = be_rx_compl_get(rxo);
		if (rxcp == NULL) {
			if (lancer_chip(adapter))
				break;

			if (flush_wait++ > 50 || be_hw_error(adapter)) {
				dev_warn(&adapter->pdev->dev,
					 "did not receive flush compl\n");
				break;
			}
			be_cq_notify(adapter, rx_cq->id, true, 0);
			mdelay(1);
		} else {
			be_rx_compl_discard(rxo, rxcp);
			be_cq_notify(adapter, rx_cq->id, false, 1);
			if (rxcp->num_rcvd == 0)
				break;
		}
	}

	/* After cleanup, leave the CQ in unarmed state */
	be_cq_notify(adapter, rx_cq->id, false, 0);

	/* Then free posted rx buffers that were not used */
	for (; atomic_read(&rxq->used) > 0; ) {
		page_info = get_rx_page_info(rxo);
		put_page(page_info->page);
		memset(page_info, 0, sizeof(*page_info));
	}
	BUG_ON(atomic_read(&rxq->used));
	rxq->tail = rxq->head = 0;
}

static void be_tx_compl_clean(struct be_adapter *adapter)
{
	struct be_tx_obj *txo;
	struct be_queue_info *txq;
	struct be_eth_tx_compl *txcp;
	u16 end_idx, reset_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
	struct device *dev = &adapter->pdev->dev;
	int i, pending_txqs;

	/* Wait for a max of 200ms for all the tx-completions to arrive. */
	do {
		pending_txqs = adapter->num_tx_qs;

		for_all_tx_queues(adapter, txo, i) {
			txq = &txo->q;
			dev_info(&adapter->pdev->dev,
				"txq[%d]: head = %d tail = %d pend_wrb_cnt = %d txq->used = %d\n",
				i, txq->head, txq->tail, txo->pend_wrb_cnt, atomic_read(&txq->used));
			while ((txcp = be_tx_compl_get(&txo->cq))) {
				end_idx =
					AMAP_GET_BITS(struct amap_eth_tx_compl,
						      wrb_index, txcp);
				num_wrbs += be_tx_compl_process(adapter, txo,
								end_idx);
				cmpl++;
			}
			if (cmpl) {
				be_cq_notify(adapter, txo->cq.id, false, cmpl);
				atomic_sub(num_wrbs, &txq->used);
				cmpl = 0;
				num_wrbs = 0;
			}
			if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
				pending_txqs--;
		}

		if (pending_txqs == 0 || ++timeo > 2000 || be_hw_error(adapter))
			break;

		mdelay(1);
	} while (true);

	/* free posted tx for which compls will never arrive */
	for_all_tx_queues(adapter, txo, i) {
		txq = &txo->q;

		if (atomic_read(&txq->used)) {
			dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
				 i, atomic_read(&txq->used));
			reset_idx = end_idx = txq->tail;
			index_adv(&end_idx, atomic_read(&txq->used) - 1,
				  txq->len);
			/* Use the tx-compl process logic to handle requests
			 * that were not sent to the HW.
			 */
			num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
			atomic_sub(num_wrbs, &txq->used);
			BUG_ON(atomic_read(&txq->used));
			txo->pend_wrb_cnt = 0;
			/* Since hw never saw these requests, reset indices */
			txq->head = txq->tail = reset_idx;
		}
	}
}

static void be_evt_queues_destroy(struct be_adapter *adapter)
{
	struct be_eq_obj *eqo;
	int i;

	for_all_evt_queues(adapter, eqo, i) {
		if (eqo->q.created) {
			be_eq_clean(eqo);
			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
			netif_napi_del(&eqo->napi);
		}
		be_queue_free(adapter, &eqo->q);
	}
}

static int be_evt_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *eq;
	struct be_eq_obj *eqo;
	struct be_aic_obj *aic;
	int i, rc;

	adapter->num_evt_qs = min_t(u32, num_irqs(adapter),
				    adapter->cfg_num_qs);
	for_all_evt_queues(adapter, eqo, i) {
		netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
			       BE_NAPI_WEIGHT);
		if (!eqo->napi.dev)
			return -ENOMEM;

		aic = &adapter->aic_obj[i];

		eqo->adapter = adapter;
		eqo->tx_budget = BE_TX_BUDGET;
		eqo->idx = i;
		aic->max_eqd = BE_MAX_EQD;
		aic->enable = true;

		eq = &eqo->q;
		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
					sizeof(struct be_eq_entry));
		if (rc)
			return rc;

		rc = be_cmd_eq_create(adapter, eqo);
		if (rc)
			return rc;
	}
	return 0;
}

static void be_mcc_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;

	q = &adapter->mcc_obj.q;
	if (q->created)
		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
	be_queue_free(adapter, q);

	q = &adapter->mcc_obj.cq;
	if (q->created)
		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
	be_queue_free(adapter, q);
}

/* Must be called only after TX qs are created as MCC shares TX EQ */
static int be_mcc_queues_create(struct be_adapter *adapter)
{
	struct be_queue_info *q, *cq;
	int status;

	cq = &adapter->mcc_obj.cq;
	status = be_queue_alloc(adapter, cq, MCC_CQ_LEN,
				sizeof(struct be_mcc_compl));
	if (status)
		return status;

	/* Use the default EQ for MCC completions */
	status = be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0);
	if (status)
		return status;

	q = &adapter->mcc_obj.q;
	status = be_queue_alloc(adapter, q, MCC_Q_LEN,
				sizeof(struct be_mcc_wrb));
	if (status)
		return status;

	/* Ask BE to create MCC queue */
	status = be_cmd_mccq_create(adapter, q, cq);
	if (status)
		return status;

	return 0;
}

static void be_tx_queues_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;
	struct be_tx_obj *txo;
	u8 i;

	for_all_tx_queues(adapter, txo, i) {
		q = &txo->q;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
		be_queue_free(adapter, q);

		q = &txo->cq;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
		be_queue_free(adapter, q);
	}
}

static int be_tx_qs_create(struct be_adapter *adapter)
{
	struct be_queue_info *cq, *eq;
	struct be_tx_obj *txo;
	int status, i;
	u8 tc_id;

	if (adapter->flags & BE_FLAGS_PG_PFC)
		adapter->num_tx_qs = be_max_prio_txqs(adapter);
	else
		adapter->num_tx_qs = min(adapter->num_evt_qs,
					 be_max_txqs(adapter));

	for_all_tx_queues(adapter, txo, i) {
		cq = &txo->cq;
		status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
					sizeof(struct be_eth_tx_compl));
		if (status)
			return status;	

		/* If num_evt_qs is less than num_tx_qs, then more than
		 * one txq share an eq
		 */
		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
		status = be_cmd_cq_create(adapter, cq, eq, false, 0);
		if (status)
			return status;

		spin_lock_init(&txo->tx_lock);
		txo->idx = i;

		status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
					sizeof(struct be_eth_wrb));
		if (status)
			return status;

		status = be_cmd_txq_create(adapter, txo, &tc_id);
		if (status)
			return status;

		if (adapter->flags & BE_FLAGS_PG_PFC)
			adapter->tc_txq_map[tc_id] = i;
	}

	dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
		 adapter->num_tx_qs);
	return 0;
}

static void be_rx_cqs_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;
	struct be_rx_obj *rxo;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		q = &rxo->cq;
		if (q->created)
			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
		be_queue_free(adapter, q);
	}
}

static int be_rx_cqs_create(struct be_adapter *adapter)
{
	struct be_queue_info *eq, *cq;
	struct be_rx_obj *rxo;
	int rc, i;

	/* We can create as many RSS rings as there are EQs. */
	adapter->num_rx_qs = adapter->num_evt_qs;

	/* We'll use RSS only if atleast 2 RSS rings are supported.
	 * When RSS is used, we'll need a default RXQ for non-IP traffic.
	 */
	if (adapter->num_rx_qs > 1)
		adapter->num_rx_qs++;

	adapter->big_page_size = (1 << get_order(adapter->rx_frag_size)) *
						PAGE_SIZE;
	adapter->max_rx_coal = gro ? BE_INIT_FRAGS_PER_FRAME : 1;
	for_all_rx_queues(adapter, rxo, i) {
		rxo->adapter = adapter;
		cq = &rxo->cq;
		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
				sizeof(struct be_eth_rx_compl));
		if (rc)
			return rc;
	
		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
		if (rc)
			return rc;
	}

	dev_info(&adapter->pdev->dev,
		 "created %d RSS queue(s) and 1 default RX queue\n",
		 adapter->num_rx_qs - 1);

	return 0;
}

static irqreturn_t be_intx(int irq, void *dev)
{
	struct be_eq_obj *eqo = dev;
	struct be_adapter *adapter = eqo->adapter;
	int num_evts = 0;

	/* IRQ is not expected when NAPI is scheduled as the EQ
	 * will not be armed.
	 * But, this can happen on Lancer INTx where it takes
	 * a while to de-assert INTx or in BE2 where occasionaly
	 * an interrupt may be raised even when EQ is unarmed.
	 * If NAPI is already scheduled, then counting & notifying
	 * events will orphan them.
	 */
	if (napi_schedule_prep(&eqo->napi)) {
		num_evts = events_get(eqo);
		__napi_schedule(&eqo->napi);
		if (num_evts)
			eqo->spurious_intr = 0;
	}
	be_eq_notify(adapter, eqo->q.id, false, true, num_evts);

	/* Return IRQ_HANDLED only for the the first spurious intr
	 * after a valid intr to stop the kernel from branding
	 * this irq as a bad one!
	 */
	if (num_evts || eqo->spurious_intr++ == 0)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
}

static irqreturn_t be_msix(int irq, void *dev)
{
	struct be_eq_obj *eqo = dev;

	be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
	napi_schedule(&eqo->napi);

	return IRQ_HANDLED;
}

static inline bool do_gro(struct be_adapter *adapter,
			struct be_rx_compl_info *rxcp)
{
	bool insert_tag = false;

#ifndef NULL_VLAN_GRP_SAFE
	insert_tag = rxcp->vlanf && !adapter->vlan_grp;
#endif
	return rxcp->tcpf && !rxcp->err && adapter->max_rx_coal > 1 &&
	       rxcp->l4_csum && !insert_tag;
}

static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
			 int budget)
{
	struct be_adapter *adapter = rxo->adapter;
	struct be_queue_info *rx_cq = &rxo->cq;
	struct be_rx_compl_info *rxcp;
	u32 work_done;
	bool flush_lro = false;
	u32 buf_consumed = 0;

	for (work_done = 0; work_done < budget; work_done++) {
		rxcp = be_rx_compl_get(rxo);
		if (!rxcp)
			break;

		/* Is it a flush compl that has no data */
		if (unlikely(rxcp->num_rcvd == 0))
			goto loop_continue;

		/* Discard compl with partial DMA Lancer B0 */
		if (unlikely(!rxcp->pkt_size)) {
			be_rx_compl_discard(rxo, rxcp);
			goto loop_continue;
		}
		/* Drop pkts that arrive due to imperfect filtering in
		* promiscuous mode on some skews
		*/
		if (!lancer_chip(adapter) &&
			unlikely(rxcp->port != adapter->port_num)) {
			be_rx_compl_discard(rxo, rxcp);
			goto loop_continue;
		}

		if (do_gro(adapter, rxcp)) {
			if (adapter->gro_supported) {
				be_rx_compl_process_gro(rxo, napi, rxcp);
			} else {
				be_rx_compl_process_lro(rxo, rxcp);
				flush_lro = true;
			}
		} else {
			be_rx_compl_process(rxo, rxcp);
		}
loop_continue:
		buf_consumed += rxcp->num_rcvd;
		be_rx_stats_update(rxo, rxcp);
	}

#ifndef GRO_defined
	if (flush_lro)
		lro_flush_all(&rxo->lro_mgr);
#endif /* GRO_defined */

	if (work_done) {
		be_cq_notify(adapter, rx_cq->id, true, work_done);

		if (atomic_read(&rxo->q.used) <= RX_FRAGS_REFILL_WM)
			be_post_rx_frags(rxo, GFP_ATOMIC,
					 max_t(u32, MAX_RX_POST, buf_consumed));
	}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
	adapter->netdev->last_rx = jiffies;
#endif
	return work_done;
}

static inline void be_update_tx_stats(struct be_adapter *adapter,
				   struct be_tx_obj *txo, u8 status)
{
	switch (status) {
	case BE_TX_COMP_VALID:
		break;
	case BE_TX_COMP_ACL_ERR:
		tx_stats(txo)->tx_compl_acl_err++;
		break;
	case BE_TX_COMP_INVALID:
		tx_stats(txo)->tx_compl_invalid++;
		break;
	default:
		tx_stats(txo)->tx_compl_err++;
	}
	return;
}

/* Return if budget has not been used or used up */
static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
			  int budget)
{
	struct be_queue_info *txq = &txo->q;
	struct be_eth_tx_compl *txcp;
	int num_wrbs = 0, work_done;

#ifdef DEV_NETMAP
	/*
	 * In netmap mode, all the work is done in the context
	 * of the client thread. Interrupt handlers only wake up
	 * clients, which may be sleeping on individual rings
	 * or on a global resource for all rings.
	 */
	if (netmap_tx_irq(adapter->netdev, txo->idx))
		return 1; /* seems to be ignored */
#endif /* DEV_NETMAP */

	for (work_done = 0; work_done < budget; work_done++) {
		txcp = be_tx_compl_get(&txo->cq);
		if (!txcp)
			break;
		num_wrbs += be_tx_compl_process(adapter, txo,
				AMAP_GET_BITS(struct amap_eth_tx_compl,
					wrb_index, txcp));
		be_update_tx_stats(adapter, txo,
				AMAP_GET_BITS(struct amap_eth_tx_compl,
					      status, txcp));
	}

	if (!work_done)
		goto done;

	be_cq_notify(adapter, txo->cq.id, true, work_done);
	tx_stats(txo)->tx_compl += work_done;

	spin_lock_bh(&txo->tx_lock);

	atomic_sub(num_wrbs, &txq->used);

	if (txo->pend_wrb_cnt && atomic_read(&txq->used) < STOP_BATCHING_WM)
		be_xmit_finish(adapter, txo);

	spin_unlock_bh(&txo->tx_lock);

	if (__netif_subqueue_stopped(adapter->netdev, txo->idx) &&
	    atomic_read(&txq->used) < txq->len / 2)
		netif_wake_subqueue(adapter->netdev, txo->idx);

done:
	return (work_done < budget); /* Done */
}

int be_poll(struct napi_struct *napi, int budget)
{
	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
	struct be_adapter *adapter = eqo->adapter;
	int max_work = 0, work, i, num_evts;
	bool tx_done;

	num_evts = events_get(eqo);

	/* Process all TXQs serviced by this EQ */
	for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
		tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
					eqo->tx_budget);
		if (!tx_done)
			max_work = budget;
	}
	
	/* This loop will iterate twice for EQ0 in which 
	 * completions of the last RXQ (default one) are also processed
	 * For other EQs the loop iterates only once
	 */
	for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
		work = be_process_rx(&adapter->rx_obj[i], napi, budget);
		max_work = max(work, max_work);
	}

	if (is_mcc_eqo(eqo))
		be_process_mcc(adapter);

	if (max_work < budget) {
		napi_complete(napi);
		be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
	} else {
		/* As we'll continue in polling mode, count and clear events */
		be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
	}
	return max_work;
}

static void be_msix_disable(struct be_adapter *adapter)
{
	if (msix_enabled(adapter)) {
		pci_disable_msix(adapter->pdev);
		adapter->num_msix_vec = 0;
		adapter->num_msix_roce_vec = 0;
	}
}

static int be_msix_enable(struct be_adapter *adapter)
{
	int i, status, num_vec;
	struct device *dev = &adapter->pdev->dev;

	/* If RoCE is supported, program the max number of NIC vectors that
	 * may be configured via set-channels, along with vectors needed for
	 * RoCe. Else, just program the number we'll use initially.
	 */
	if (be_roce_supported(adapter))
		num_vec = min_t(int, 2 * be_max_eqs(adapter),
				2 * num_online_cpus());
	else
		num_vec = adapter->cfg_num_qs;

	for (i = 0; i < num_vec; i++)
		adapter->msix_entries[i].entry = i;

	status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
	if (status == 0) {
		goto done;
	} else if (status >= MIN_MSIX_VECTORS) {
		dev_info(dev, "Could not get %d MSI-x vector(s)\n", num_vec);
		dev_info(dev, "Using %d vector(s)\n", status);
		num_vec = status;
		status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
				num_vec);
		if (!status)
			goto done;
	}

	dev_warn(dev, "MSIx enable failed\n");

	/* INTx is not supported in VFs, so fail probe if enable_msix fails */
	if (!be_physfn(adapter))
		return status;

	return 0;
done:
	if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
		adapter->num_msix_roce_vec = num_vec / 2;
		dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
			 adapter->num_msix_roce_vec);
	}

	adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;

	dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
		 adapter->num_msix_vec);
	return 0;
}

static inline int be_msix_vec_get(struct be_adapter *adapter,
				  struct be_eq_obj *eqo)
{
	return adapter->msix_entries[eqo->msix_idx].vector;
}

static int be_msix_register(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct be_eq_obj *eqo;
	int status, i, vec;

	for_all_evt_queues(adapter, eqo, i) {
		sprintf(eqo->desc, "%s-q%d", netdev->name, i);
		vec = be_msix_vec_get(adapter, eqo);
		status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
		if (status)
			goto err_msix;
	}

	return 0;
err_msix:
	for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
		free_irq(be_msix_vec_get(adapter, eqo), eqo);
	dev_warn(&adapter->pdev->dev,
		"MSIX Request IRQ failed - err %d\n", status);
	be_msix_disable(adapter);
	return status;
}

static int be_irq_register(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int status = 0;

	if (msix_enabled(adapter)) {
		status = be_msix_register(adapter);
		if (status == 0)
			goto done;
		/* INTx is not supported for VF */
		if (!be_physfn(adapter))
			return status;
	}

	/* INTx: only the first EQ is used */
	netdev->irq = adapter->pdev->irq;
	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
			     &adapter->eq_obj[0]);
	if (status) {
		dev_err(&adapter->pdev->dev,
			"INTx request IRQ failed - err %d\n", status);
		return status;
	}
done:
	adapter->isr_registered = true;
	return 0;
}

static void be_irq_unregister(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct be_eq_obj* eqo;
	int i;

	if (!adapter->isr_registered)
		return;

	/* INTx */
	if (!msix_enabled(adapter)) {
		free_irq(netdev->irq, &adapter->eq_obj[0]);
		goto done;
	}

	/* MSIx */
	for_all_evt_queues(adapter, eqo, i)
		free_irq(be_msix_vec_get(adapter, eqo), eqo);

done:
	adapter->isr_registered = false;
}

static u16 be_select_queue(struct net_device *netdev,
			struct sk_buff *skb)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	u8 prio;

	if (adapter->num_tx_qs == 1)
		return 0;

	if (adapter->flags & BE_FLAGS_PG_PFC) {
		prio = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK) >>
				VLAN_PRIO_SHIFT;
		return adapter->tc_txq_map[adapter->prio_tc_map[prio]];
	}
	return skb_tx_hash(netdev, skb);
}

static void be_rx_qs_destroy(struct be_adapter *adapter)
{
	struct be_queue_info *q;
	struct be_rx_obj *rxo;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		q = &rxo->q;
		if (q->created) {
			be_cmd_rxq_destroy(adapter, q);
			be_rx_cq_clean(rxo);
		}
		be_queue_free(adapter, q);

		kfree(rxo->page_info_tbl);
	}
}

static int be_close(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_eq_obj *eqo;
	int i;

	dev_info(&adapter->pdev->dev,"%s invoked\n", __func__);
	dump_stack();
	be_roce_dev_close(adapter);

	if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
		for_all_evt_queues(adapter, eqo, i)
			napi_disable(&eqo->napi);
		adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
	}

	be_async_mcc_disable(adapter);

	be_link_status_update(adapter, LINK_DOWN);
	adapter->flags &= ~BE_FLAGS_LINK_STATUS_INITED;

	/* Wait for all pending tx completions to arrive so that
	 * all tx skbs are freed.
	 */
	netif_tx_disable(netdev);
	be_tx_compl_clean(adapter);

	be_rx_qs_destroy(adapter);

	for (i = 1; i < (adapter->uc_macs + 1); i++)
		be_cmd_pmac_del(adapter, adapter->if_handle,
				adapter->pmac_id[i], 0);
	adapter->uc_macs = 0;

	for_all_evt_queues(adapter, eqo, i) {
		if (msix_enabled(adapter))
			synchronize_irq(be_msix_vec_get(adapter, eqo));
		else
			synchronize_irq(netdev->irq);
		be_eq_clean(eqo);
	}

	be_irq_unregister(adapter);
#ifdef DEV_NETMAP
	 netmap_disable_all_rings(netdev);
#endif

	return 0;
}

static int be_rx_qs_create(struct be_adapter *adapter)
{
	struct be_rx_obj *rxo;
	int rc, i, j;
	u8 rsstable[128];

	for_all_rx_queues(adapter, rxo, i) {
		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
				    sizeof(struct be_eth_rx_d));
		if (rc)
			return rc;

		rxo->page_info_tbl = kzalloc(sizeof(struct be_rx_page_info) *
					     RX_Q_LEN, GFP_KERNEL);
		if (!rxo->page_info_tbl)
			return -ENOMEM;
	}

	/* The FW would like the default RXQ to be created first */
	rxo = default_rxo(adapter);
	rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
			       adapter->rx_frag_size, adapter->if_handle,
			       false, &rxo->rss_id);
	if (rc)
		return rc;

	for_all_rss_queues(adapter, rxo, i) {
		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
				       adapter->rx_frag_size,
				       adapter->if_handle, true, &rxo->rss_id);
		if (rc)
			return rc;
	}

	if (be_multi_rxq(adapter)) {
		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
			for_all_rss_queues(adapter, rxo, i) {
				if ((j + i) >= 128)
					break;
				rsstable[j + i] = rxo->rss_id;
			}
		}
		adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
					RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;

		if (!BEx_chip(adapter))
			adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
						RSS_ENABLE_UDP_IPV6;

	} else {
		/* Disable RSS, if only default RX Q is created */
		adapter->rss_flags = RSS_ENABLE_NONE;
	}

	rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 128);
	if (rc) {
		adapter->rss_flags = RSS_ENABLE_NONE;
		return rc;
	}

	/* First time posting */
	for_all_rx_queues(adapter, rxo, i) {
		be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
#ifdef DEV_NETMAP
		if (be2net_netmap_configure_rx_ring(adapter, i))
			return 0;
#endif /* DEV_NETMAP */
	}

	return 0;
}

static int be_open(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_eq_obj *eqo;
	struct be_rx_obj *rxo;
	struct be_tx_obj *txo;
	u8 link_status;
	int status, i;

	status = be_rx_qs_create(adapter);
	if (status)
		goto err;

	status = be_irq_register(adapter);
	if (status)
		goto err;

	for_all_rx_queues(adapter, rxo, i)
		be_cq_notify(adapter, rxo->cq.id, true, 0);

	for_all_tx_queues(adapter, txo, i)
		be_cq_notify(adapter, txo->cq.id, true, 0);

	be_async_mcc_enable(adapter);

	for_all_evt_queues(adapter, eqo, i) {
		napi_enable(&eqo->napi);
		be_eq_notify(adapter, eqo->q.id, true, false, 0);
	}
	adapter->flags |= BE_FLAGS_NAPI_ENABLED;

	status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
	if (!status) {
		adapter->flags |= BE_FLAGS_LINK_STATUS_INITED;
		be_link_status_update(adapter, link_status);
	}

	netif_tx_start_all_queues(netdev);

#ifdef DEV_NETMAP
	netmap_enable_all_rings(adapter->netdev);
#endif
	be_roce_dev_open(adapter);
	return 0;
err:
	be_close(adapter->netdev);
	return -EIO;
}

static int be_setup_wol(struct be_adapter *adapter, bool enable)
{
	struct be_dma_mem cmd;
	int status = 0;
	u8 mac[ETH_ALEN];

	memset(mac, 0, ETH_ALEN);

	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
	if (cmd.va == NULL)
		return -1;
	memset(cmd.va, 0, cmd.size);

	if (enable) {
		status = pci_write_config_dword(adapter->pdev,
			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
		if (status) {
			dev_err(&adapter->pdev->dev,
				"Could not enable Wake-on-lan\n");
			pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
					cmd.dma);
			return status;
		}
		status = be_cmd_enable_magic_wol(adapter,
				adapter->netdev->dev_addr, &cmd);
		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
	} else {
		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
	}

	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
	return status;
}

/*
 * Generate a seed MAC address from the PF MAC Address using jhash.
 * MAC Address for VFs are assigned incrementally starting from the seed.
 * These addresses are programmed in the ASIC by the PF and the VF driver
 * queries for the MAC address during its probe.
 */
static int be_vf_eth_addr_config(struct be_adapter *adapter)
{
	u32 vf;
	int status = 0;
	u8 mac[ETH_ALEN];
	struct be_vf_cfg *vf_cfg;

	be_vf_eth_addr_generate(adapter, mac);

	for_all_vfs(adapter, vf_cfg, vf) {
		if (BEx_chip(adapter))
			status = be_cmd_pmac_add(adapter, mac,
						 vf_cfg->if_handle,
						 &vf_cfg->pmac_id, vf + 1);
		else
			status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
						vf + 1);

		if (status)
			dev_err(&adapter->pdev->dev,
				"Mac address assignment failed for VF %d\n", vf);
		else
			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);

		mac[5] += 1;
	}
	return status;
}

static int be_vfs_mac_query(struct be_adapter *adapter)
{
	int status, vf;
	u8 mac[ETH_ALEN];
	struct be_vf_cfg *vf_cfg;
	bool active;

	for_all_vfs(adapter, vf_cfg, vf) {
		active = false;
		status = be_cmd_get_mac_from_list(adapter, mac, &active,
						  &vf_cfg->pmac_id,
						  vf_cfg->if_handle, vf+1);

		if (!status && active)
			status = be_cmd_get_mac_from_list(adapter, mac, &active,
							  &vf_cfg->pmac_id,
							  vf_cfg->if_handle,
							  vf+1);
		if (status)
			return status;
		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
	}
	return 0;
}

static void be_vf_clear(struct be_adapter *adapter)
{
	struct be_vf_cfg *vf_cfg;
	u32 vf;

	if (pci_vfs_assigned(adapter->pdev)) {
		dev_warn(&adapter->pdev->dev,
			 "VFs are assigned to VMs: not disabling VFs\n");
		goto done;
	}

	/* do not call pci_disable_sriov if error recovery is in progress */
	if (be_error_recovering(adapter))
		goto done;

	pci_disable_sriov(adapter->pdev);

	for_all_vfs(adapter, vf_cfg, vf) {
		if (BEx_chip(adapter))
			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
					vf_cfg->pmac_id, vf + 1);
		else
			be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
				       vf + 1);

		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
	}
done:
	kfree(adapter->vf_cfg);
	adapter->num_vfs = 0;
}

static void be_clean_mac_learning(struct be_adapter *adapter)
{
	int j;
	struct hlist_node *pos, *n;
	struct hlist_head *head;
	struct mac_node *node = NULL;

	if (adapter->workq)
		flush_workqueue(adapter->workq);

	if (adapter->flags & BE_FLAGS_MAC_LEARNING_INITIALIZED) {
		for (j = 0; j < BE_MAX_MAC; j++) {
			head = &(adapter->machash.head[j]);
			hlist_for_each_entry_safe(node, pos, n, head, list) {
				be_cmd_pmac_del(adapter,
						adapter->if_handle,
						node->pmac_id, 0);
				kfree(node);
			}
		}
		kfree(adapter->machash.head);
		adapter->flags &= ~BE_FLAGS_MAC_LEARNING_INITIALIZED;
	}
	if (adapter->workq)
		destroy_workqueue(adapter->workq);
}

static void be_clear_queues(struct be_adapter *adapter)
{
	be_mcc_queues_destroy(adapter);
	be_rx_cqs_destroy(adapter);
	be_tx_queues_destroy(adapter);
	be_evt_queues_destroy(adapter);
}

static void be_cancel_worker(struct be_adapter *adapter)
{
	if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
		cancel_delayed_work_sync(&adapter->work);
		adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
	}
}

static void be_mac_clear(struct be_adapter *adapter)
{
	int i;

	if (adapter->pmac_id) {
		for (i = 0; i < (adapter->uc_macs + 1); i++)
			be_cmd_pmac_del(adapter, adapter->if_handle,
					adapter->pmac_id[i], 0);
		adapter->uc_macs = 0;

		kfree(adapter->pmac_id);
		adapter->pmac_id = NULL;
	}
}

static int be_clear(struct be_adapter *adapter)
{
	if (adapter->mc_type == UMC || adapter->mc_type == nPAR)
		be_clean_mac_learning(adapter);

	be_cancel_worker(adapter);

	if (sriov_enabled(adapter))
		be_vf_clear(adapter);

	/* delete the primary mac along with the uc-mac list */
	be_mac_clear(adapter);

	be_cmd_if_destroy(adapter, adapter->if_handle,  0);

	be_clear_queues(adapter);

	be_msix_disable(adapter);
	return 0;
}

static int be_vfs_if_create(struct be_adapter *adapter)
{
	struct be_resources res = {0};
	struct be_vf_cfg *vf_cfg;
	u32 cap_flags, en_flags, vf;
	int status = 0;

	cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
		    BE_IF_FLAGS_MULTICAST;

	for_all_vfs(adapter, vf_cfg, vf) {
		if (!BE3_chip(adapter)) {
			status = be_cmd_get_profile_config(adapter, &res,
							   vf + 1);
			if (!status)
				cap_flags = res.if_cap_flags;
		}

		/* If a FW profile exists, then cap_flags are updated */
		en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
			   BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
		status = be_cmd_if_create(adapter, cap_flags, en_flags,
					  &vf_cfg->if_handle, vf + 1);
		if (status)
			goto err;
	}
err:
	return status;
}

static int be_vf_setup_init(struct be_adapter *adapter)
{
	struct be_vf_cfg *vf_cfg;
	int vf;

	adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
				  GFP_KERNEL);
	if (!adapter->vf_cfg)
		return -ENOMEM;

	for_all_vfs(adapter, vf_cfg, vf) {
		vf_cfg->if_handle = -1;
		vf_cfg->pmac_id = -1;
	}
	return 0;
}

static int be_vfs_regain_handles(struct be_adapter *adapter)
{
	struct be_vf_cfg *vf_cfg;
	int status = 0, vf;

	for_all_vfs(adapter, vf_cfg, vf) {
		status = be_cmd_get_if_id(adapter, vf_cfg, vf);
		if (status)
			goto err;
	}

	status = be_vfs_mac_query(adapter);
	if (status)
		goto err;
err:
	return status;
}

static int be_vfs_provision_resources(struct be_adapter *adapter)
{
	int status = 0;

	status = be_vfs_if_create(adapter);
	if (status)
		goto err;

	status = be_vf_eth_addr_config(adapter);
	if (status)
		goto err;
err:
	return status;
}

static int be_vf_setup(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	struct be_vf_cfg *vf_cfg;
	int status, old_vfs, vf;
	u32 privileges;
	u16 lnk_speed;

	old_vfs = pci_num_vf(adapter->pdev);
	if (old_vfs) {
		dev_info(dev, "%d VFs are already enabled\n", old_vfs);
		if (old_vfs != num_vfs)
			dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
		adapter->num_vfs = old_vfs;
	} else {
		if (num_vfs > be_max_vfs(adapter)) {
			dev_info(dev,
				 "Resources unavailable to initialize %d VFs\n",
				 num_vfs);
			dev_info(dev, "Limiting to %d VFs\n",
				 be_max_vfs(adapter));
		}
		adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
		if (!adapter->num_vfs)
			return 0;
	}

	status = be_vf_setup_init(adapter);
	if (status)
		goto err;

	/*
	 * VFs are already enabled.
	 * If its not a FW error recovery, regain old handles provisioned to VF
	 * Else, re-provision resources to VFs as the FW undergone a reset
	 */
	if (old_vfs) {
		if (be_error_recovering(adapter)) {
			status = be_vfs_provision_resources(adapter);
			if (status)
				goto err;
		} else {
			status = be_vfs_regain_handles(adapter);
			if (status)
				goto err;
		}
	} else {
		status = be_vfs_provision_resources(adapter);
		if (status)
			goto err;
	}

	for_all_vfs(adapter, vf_cfg, vf) {
		/* Allow VFs to programs MAC/VLAN filters */
		status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
		if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
			status = be_cmd_set_fn_privileges(adapter,
							  privileges |
							  BE_PRIV_FILTMGMT,
							  vf + 1);
			if (!status)
				dev_info(dev, "VF%d has FILTMGMT privilege\n",
					 vf);
		}

		if (!old_vfs && BE3_chip(adapter))
			be_cmd_set_qos(adapter, 1000, vf+1);

		status = be_cmd_link_status_query(adapter, &lnk_speed,
						  NULL, vf + 1);
		if (!status)
			vf_cfg->tx_rate = lnk_speed;

		if (!old_vfs || be_error_recovering(adapter)) {
			be_cmd_enable_vf(adapter, vf + 1);
			be_cmd_set_logical_link_config(adapter, 1, 1, vf+1);
		}
	}

	if (!old_vfs) {
		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
		if (status) {
			dev_err(dev, "SRIOV enable failed\n");
			adapter->num_vfs = 0;
			return 0;
		}
	}
	return 0;
err:
	dev_err(dev, "VF setup failed\n");
	be_vf_clear(adapter);
	return status;
}

/* Converting function_mode bits on BE3 to SH mc_type enums */
static u8 be_convert_mc_type(u32 function_mode)
{
	if (function_mode & FLEX10_MODE && function_mode & VNIC_MODE)
		return vNIC1;
	else if (function_mode & FLEX10_MODE)
		return FLEX10;
	else if (function_mode & VNIC_MODE)
		return vNIC2;
	else if (function_mode & UMC_ENABLED)
		return UMC;
	else
		return MC_NONE;
}

/* On BE2/BE3 FW does not suggest the supported limits */
static void BEx_get_resources(struct be_adapter *adapter,
			      struct be_resources *res)
{
	struct pci_dev *pdev = adapter->pdev;
	bool use_sriov = false;
	int max_vfs;

	if (be_physfn(adapter) && BE3_chip(adapter))
		be_cmd_get_profile_config(adapter, res, 0);

	max_vfs = res->max_vfs ? : pci_sriov_get_totalvfs(pdev);

	if (BE3_chip(adapter) && sriov_want(adapter)) {
		res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
		use_sriov = res->max_vfs;
	}

	if (be_physfn(adapter))
		res->max_uc_mac = BE_UC_PMAC_COUNT;
	else
		res->max_uc_mac = BE_VF_UC_PMAC_COUNT;

	adapter->mc_type = be_convert_mc_type(adapter->function_mode);

	if (be_is_mc(adapter)) {
		/* Assuming that there are 4 channels per port,
		 * when multi-channel is enabled
		 */
		if (be_is_qnq_mode(adapter))
			res->max_vlans = BE_NUM_VLANS_SUPPORTED / 8;
		else
			/* In a non-qnq multichannel mode, the pvid
			 * takes up one vlan entry
			 */
			res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
	} else {
		res->max_vlans = BE_NUM_VLANS_SUPPORTED;
	}
	res->max_mcast_mac = BE_MAX_MC;

	/* For BE3 1Gb ports, F/W does not properly support multiple TXQs.
	 * Create multiple TX rings equal to Rx rings for RSS capable functions
	 * in multichannel configuration.
	 */
	if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
	    !be_physfn(adapter) || (be_is_mc(adapter) &&
	    !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
		res->max_tx_qs = 1;
	else
		res->max_tx_qs = BE3_MAX_TX_QS;

	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
	    !use_sriov && be_physfn(adapter))
		res->max_rss_qs = (adapter->be3_native) ?
					BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
	res->max_rx_qs = res->max_rss_qs + 1;

	if (be_physfn(adapter))
		res->max_evt_qs = (max_vfs > 0) ?
					BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
	else
		res->max_evt_qs = 1;

	res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
	if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
		res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
}

static void be_setup_init(struct be_adapter *adapter)
{
	adapter->vlan_prio_bmap = 0xff;
	adapter->phy.link_speed = -1;
	adapter->if_handle = -1;
	adapter->be3_native = false;
	adapter->promiscuous = false;
	if (be_physfn(adapter))
		adapter->cmd_privileges = MAX_PRIVILEGES;
	else
		adapter->cmd_privileges = MIN_PRIVILEGES;
}

static int be_get_resources(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	struct be_resources res = {0};
	int status;

	if (BEx_chip(adapter)) {
		BEx_get_resources(adapter, &res);
		adapter->res = res;
	}

	/* For Lancer, SH etc read per-function resource limits from FW.
	 * GET_FUNC_CONFIG returns per function guaranteed limits.
	 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
	 */
	if (!BEx_chip(adapter)) {
		status = be_cmd_get_func_config(adapter, &res);
		if (status)
			return status;

		/* If RoCE may be enabled stash away half the EQs for RoCE */
		if (be_roce_supported(adapter))
			res.max_evt_qs /= 2;
		adapter->res = res;

		if (be_physfn(adapter)) {
			status = be_cmd_get_profile_config(adapter, &res, 0);
			if (status)
				return status;
			adapter->res.max_vfs = res.max_vfs;
		}
	}

	if (adapter->flags & BE_FLAGS_PG_PFC)
		be_cmd_get_max_prio_txqs(adapter, &adapter->res.max_prio_tx_qs);

	/*Handle kernel imposed limits if any */
	if (!tx_mq_kernel) {
		adapter->res.max_tx_qs = 1;
		dev_info(dev, "Kernel doesn't support multiple TXQs\n");
	}

	if (!BEx_chip(adapter)) {
		dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
			 be_max_txqs(adapter), be_max_rxqs(adapter),
			 be_max_rss(adapter), be_max_eqs(adapter),
			 be_max_vfs(adapter));
		dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
			 be_max_uc(adapter), be_max_mc(adapter),
			 be_max_vlans(adapter));
	}

	return 0;
}

/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
	u16 profile_id;
	int status;

	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
				     &adapter->function_mode,
				     &adapter->function_caps,
				     &adapter->asic_rev);
	if (status)
		return status;

	if (be_physfn(adapter)) {
		status = be_cmd_get_active_profile(adapter, &profile_id);
		if (!status)
			dev_info(&adapter->pdev->dev,
				 "Using profile 0x%x\n", profile_id);
	}

	/* Attempt to enable NIC PFC/ETS feature only if
	 * multi TXQ support exists in kernel
	 */
	if (tx_prio && tx_mq_kernel)
		be_cmd_enable_pg_pfc(adapter);

	status = be_get_resources(adapter);
	if (status)
		return status;

	/* Sanitize cfg_num_qs based on HW and platform limits */
	adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));

	return 0;
}

static void be_init_mac_learning(struct be_adapter *adapter)
{
	int i;

	adapter->workq = create_workqueue("mac_lrn_unlrn_queue");
	if (!adapter->workq)
		return;

	adapter->machash.head = kcalloc(BE_MAX_MAC,
					sizeof(struct hlist_head),
					GFP_ATOMIC);
	if (!adapter->machash.head) {
		dev_err(&adapter->pdev->dev,
			"Mac hash table alloc failed\n");
		goto err;
	}
	spin_lock_init(&adapter->mac_hash_lock);

	for (i = 0; i < BE_MAX_MAC; i++)
		INIT_HLIST_HEAD(&adapter->machash.head[i]);

	adapter->flags |= BE_FLAGS_MAC_LEARNING_ENABLED;
	adapter->flags |= BE_FLAGS_MAC_LEARNING_INITIALIZED;

	return;
err:
	be_clean_mac_learning(adapter);
	return;
}

static int be_mac_setup(struct be_adapter *adapter)
{
	u8 mac[ETH_ALEN];
	int status;

	if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
		status = be_cmd_get_perm_mac(adapter, mac);
		if (status)
			return status;

		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
	} else {
		/* Maybe the HW was reset; dev_addr must be re-programmed */
		memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
	}

	/* For BE3 VFs, the PF programs the initial MAC address */
	if (!(BEx_chip(adapter) && be_virtfn(adapter)))
		be_cmd_pmac_add(adapter, mac, adapter->if_handle,
				&adapter->pmac_id[0], 0);
	return 0;
}

static void be_schedule_worker(struct be_adapter *adapter)
{
	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
}

static int be_setup_queues(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int status;

	status = be_evt_queues_create(adapter);
	if (status)
		goto err;

	status = be_tx_qs_create(adapter);
	if (status)
		goto err;

	status = be_rx_cqs_create(adapter);
	if (status)
		goto err;

	status = be_mcc_queues_create(adapter);
	if (status)
		goto err;

	status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
	if (status)
		goto err;

	netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);

	return 0;
err:
	dev_err(&adapter->pdev->dev, "queue_setup failed\n");
	return status;
}

int be_update_queues(struct be_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	int status;

	if (netif_running(netdev))
		be_close(netdev);

	be_cancel_worker(adapter);

	/* If any vectors have been shared with RoCE we cannot re-program
	 * the MSIx table.
	 */
	if (!adapter->num_msix_roce_vec)
		be_msix_disable(adapter);

	be_clear_queues(adapter);

	if (!msix_enabled(adapter)) {
		status = be_msix_enable(adapter);
		if (status)
			return status;
	}

	status = be_setup_queues(adapter);
	if (status)
		return status;

	be_schedule_worker(adapter);

	if (netif_running(netdev))
		status = be_open(netdev);

	return status;
}

static int be_setup(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	u32 tx_fc, rx_fc, en_flags;
	int status, level;

	be_setup_init(adapter);

	if (!lancer_chip(adapter))
		be_cmd_req_native_mode(adapter);

	status = be_get_config(adapter);
	if (status)
		goto err;

	adapter->pmac_id = kcalloc(be_max_uc(adapter),
				   sizeof(*adapter->pmac_id), GFP_KERNEL);
	if (!adapter->pmac_id) {
		status = -ENOMEM;
		goto err;
	}

	status = be_msix_enable(adapter);
	if (status)
		goto err;

	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
		   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
		en_flags |= BE_IF_FLAGS_RSS;
	en_flags = en_flags & be_if_cap_flags(adapter);
	status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
				  &adapter->if_handle, 0);
	if (status)
		goto err;

	/* Updating real_num_tx/rx_queues() requires rtnl_lock() */
	rtnl_lock();
	status = be_setup_queues(adapter);
	rtnl_unlock();
	if (status)
		goto err;

	be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
	/* In UMC mode FW does not return right privileges.
	 * Override with correct privilege equivalent to PF.
	 */
	if (be_is_mc(adapter) && be_physfn(adapter))
		adapter->cmd_privileges = MAX_PRIVILEGES;

	if (adapter->mc_type == UMC || adapter->mc_type == nPAR)
		be_init_mac_learning(adapter);

	be_check_sfps(adapter);

	status = be_mac_setup(adapter);
	if (status)
		goto err;

	be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);

	if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
		dev_err(dev, "Firmware on card is old(%s), IRQs may not work\n",
			adapter->fw_ver);
		dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
	}

	if (BEx_chip(adapter)) {
		level = be_cmd_get_fw_log_level(adapter);
		adapter->msg_enable =
			level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
	}

	if (adapter->vlans_added)
		be_vid_config(adapter);

	be_set_rx_mode(adapter->netdev);

	be_cmd_get_acpi_wol_cap(adapter);

	be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
		be_cmd_set_flow_control(adapter, adapter->tx_fc,
					adapter->rx_fc);

	if (be_physfn(adapter))
		be_cmd_set_logical_link_config(adapter, 1, 1, 0);

	if (sriov_want(adapter)) {
		if (!sriov_kernel)
			dev_warn(dev, "kernel doesn't fully support SRIOV\n");
		else if (!be_max_vfs(adapter))
			dev_warn(dev, "device doesn't support SRIOV\n");
		else
			be_vf_setup(adapter);
	}

	status = be_cmd_get_phy_info(adapter);
	if (!status && be_pause_autoneg_supported(adapter))
		adapter->phy.fc_autoneg = 1;

	if (be_physfn(adapter) && !lancer_chip(adapter)) {
		if ((adapter->phy.phy_type == TN_8022) &&
		    (adapter->phy.interface_type == PHY_TYPE_BASET_10GB))
			be_cmd_set_emi_canceller(adapter, emi_canceller);
	}

	be_schedule_worker(adapter);
	return 0;
err:
	be_clear(adapter);
	return status;
}

#define IOCTL_COOKIE "SERVERENGINES CORP"
static int be_do_ioctl(struct net_device *netdev,
			struct ifreq *ifr, int cmd)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_cmd_req_hdr req;
	void *data = ifr->ifr_data;
	void *ioctl_ptr;
	void *va;
	dma_addr_t dma;
	u32 req_size;
	int status, ret = 0;
	u8 cookie[32];

	switch (cmd) {
	case SIOCDEVPRIVATE:
		if (copy_from_user(cookie, data, strlen(IOCTL_COOKIE)))
			return -EFAULT;

		if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
			return -EINVAL;

		ioctl_ptr = (u8 *)data + strlen(IOCTL_COOKIE);
		if (copy_from_user(&req, ioctl_ptr,
				sizeof(struct be_cmd_req_hdr)))
			return -EFAULT;

		req_size = le32_to_cpu(req.request_length);
		if (req_size > 65536)
			return -EINVAL;

		req_size += sizeof(struct be_cmd_req_hdr);
		va = pci_alloc_consistent(adapter->pdev, req_size, &dma);
		if (!va)
			return -ENOMEM;
		if (copy_from_user(va, ioctl_ptr, req_size)) {
			ret = -EFAULT;
			break;
		}

		status = be_cmd_pass_ext_ioctl(adapter, dma, req_size, va);
		if (status == -EIO)
			break;

		if (copy_to_user(ioctl_ptr, va, req_size)) {
			ret = -EFAULT;
			break;
		}
		break;
	default:
		return -EOPNOTSUPP;
	}

	if (va)
		pci_free_consistent(adapter->pdev, req_size, va, dma);

	return ret;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void be_netpoll(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_eq_obj *eqo;
	int i;

	for_all_evt_queues(adapter, eqo, i) {
		be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
		napi_schedule(&eqo->napi);
	}

	return;
}
#endif

#define FW_FILE_HDR_SIGN	"ServerEngines Corp. "
static char flash_cookie[2][16] =	{"*** SE FLAS", "H DIRECTORY *** "};

static bool be_flash_redboot(struct be_adapter *adapter,
			const u8 *p, u32 img_start, int image_size,
			int hdr_size)
{
	u32 crc_offset;
	u8 flashed_crc[4];
	int status;

	crc_offset = hdr_size + img_start + image_size - 4;

	p += crc_offset;

	status = be_cmd_get_flash_crc(adapter, flashed_crc,
			(image_size - 4));
	if (status) {
		dev_err(&adapter->pdev->dev,
		"could not get crc from flash, not flashing redboot\n");
		return false;
	}

	/*update redboot only if crc does not match*/
	if (!memcmp(flashed_crc, p, 4))
		return false;
	else
		return true;
}

static bool phy_flashing_required(struct be_adapter *adapter)
{
	return (adapter->phy.phy_type == TN_8022 &&
		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
}

static bool is_comp_in_ufi(struct be_adapter *adapter,
			 struct flash_section_info *fsec, int type)
{
	int i = 0, img_type = 0;
	struct flash_section_info_g2 *fsec_g2 = NULL;

	if (BE2_chip(adapter))
		fsec_g2 = (struct flash_section_info_g2 *)fsec;

	for (i = 0; i < MAX_FLASH_COMP; i++) {
		if (fsec_g2)
			img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
		else
			img_type = le32_to_cpu(fsec->fsec_entry[i].type);

		if (img_type == type)
			return true;
	}
	return false;

}

static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
						int header_size,
						const struct firmware *fw)
{
	struct flash_section_info *fsec = NULL;
	const u8 *p = fw->data;

	p += header_size;
	while (p < (fw->data + fw->size)) {
		fsec = (struct flash_section_info *)p;
		if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
			return fsec;
		p += 32;
	}
	return NULL;
}

static int be_flash(struct be_adapter *adapter, const u8 *img,
		struct be_dma_mem *flash_cmd, int optype, int img_size)
{
	u32 total_bytes = 0, flash_op = 0, num_bytes = 0;
	int status = 0;
	struct be_cmd_write_flashrom *req = flash_cmd->va;

	total_bytes = img_size;
	while (total_bytes) {
		num_bytes = min_t(u32, 32*1024, total_bytes);

		total_bytes -= num_bytes;

		if (!total_bytes) {
			if (optype == OPTYPE_PHY_FW)
				flash_op = FLASHROM_OPER_PHY_FLASH;
			else
				flash_op = FLASHROM_OPER_FLASH;
		} else {
			if (optype == OPTYPE_PHY_FW)
				flash_op = FLASHROM_OPER_PHY_SAVE;
			else
				flash_op = FLASHROM_OPER_SAVE;
		}

		memcpy(req->data_buf, img, num_bytes);
		img += num_bytes;
		status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
						flash_op, num_bytes);
		if (status) {
			if (compl_status(status) == ILLEGAL_IOCTL_REQ &&
			    optype == OPTYPE_PHY_FW)
				break;
			dev_err(&adapter->pdev->dev,
				"cmd to write to flash rom failed\n");
			return status;
		}
	}
	return 0;
}

/* For BE2, BE3 and BE3-R */
static int be_flash_BEx(struct be_adapter *adapter,
		const struct firmware *fw,
		struct be_dma_mem *flash_cmd, int num_of_images)

{
	int status = 0, i, filehdr_size = 0;
	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
	const u8 *p = fw->data;
	struct flash_comp *pflashcomp;
	int num_comp, redboot;
	struct flash_section_info *fsec = NULL;
	struct flash_comp gen3_flash_types[] = {
		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
		{ FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
		{ FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
		{ FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
		{ FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
		{ FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
			FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
		{ FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
	};
	struct flash_comp gen2_flash_types[] = {
		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
		{ FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
		{ FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
		{ FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
		{ FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
			 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
	};

	if (BE3_chip(adapter)) {
		pflashcomp = gen3_flash_types;
		filehdr_size = sizeof(struct flash_file_hdr_g3);
		num_comp = ARRAY_SIZE(gen3_flash_types);
	} else {
		pflashcomp = gen2_flash_types;
		filehdr_size = sizeof(struct flash_file_hdr_g2);
		num_comp = ARRAY_SIZE(gen2_flash_types);
	}

	/* Get flash section info*/
	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
	if (!fsec) {
		dev_err(&adapter->pdev->dev,
			"Invalid Cookie. FW image may be corrupted\n");
		return -1;
	}
	for (i = 0; i < num_comp; i++) {

		if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
			continue;

		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
				memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
			continue;
		if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
		    !phy_flashing_required(adapter))
				continue;

		if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
			redboot = be_flash_redboot(adapter, fw->data,
				pflashcomp[i].offset, pflashcomp[i].size,
				filehdr_size + img_hdrs_size);
			if (!redboot)
				continue;
		}

		p = fw->data;
		p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
		if (p + pflashcomp[i].size > fw->data + fw->size)
			return -1;
		status = be_flash(adapter, p, flash_cmd,
				  pflashcomp[i].optype, pflashcomp[i].size);
		if (status) {
			dev_err(&adapter->pdev->dev,
				"Flashing section type %d failed\n",
				pflashcomp[i].img_type);
			return status;
		}
	}
	return 0;
}

static int be_flash_skyhawk(struct be_adapter *adapter,
		const struct firmware *fw,
		struct be_dma_mem *flash_cmd, int num_of_images)
{
	int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
	int img_offset, img_size, img_optype, redboot;
	struct device *dev = &adapter->pdev->dev;
	struct flash_section_info *fsec = NULL;
	int status = 0, i, filehdr_size = 0;
	const u8 *p = fw->data;

	filehdr_size = sizeof(struct flash_file_hdr_g3);
	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
	if (!fsec) {
		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
		return -1;
	}

	for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
		img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
		img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);

		switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
		case IMAGE_FIRMWARE_iSCSI:
			img_optype = OPTYPE_ISCSI_ACTIVE;
			break;
		case IMAGE_BOOT_CODE:
			img_optype = OPTYPE_REDBOOT;
			break;
		case IMAGE_OPTION_ROM_ISCSI:
			img_optype = OPTYPE_BIOS;
			break;
		case IMAGE_OPTION_ROM_PXE:
			img_optype = OPTYPE_PXE_BIOS;
			break;
		case IMAGE_OPTION_ROM_FCoE:
			img_optype = OPTYPE_FCOE_BIOS;
			break;
		case IMAGE_FIRMWARE_BACKUP_iSCSI:
			img_optype = OPTYPE_ISCSI_BACKUP;
			break;
		case IMAGE_NCSI:
			img_optype = OPTYPE_NCSI_FW;
			break;
		default:
			continue;
		}

		if (img_optype == OPTYPE_REDBOOT) {
			redboot = be_flash_redboot(adapter, fw->data,
					img_offset, img_size,
					filehdr_size + img_hdrs_size);
			if (!redboot)
				continue;
		}

		p = fw->data;
		p += filehdr_size + img_offset + img_hdrs_size;
		if (p + img_size > fw->data + fw->size)
			return -1;

		status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
		if (status) {
			dev_err(dev, "Flashing section type %d failed\n",
				fsec->fsec_entry[i].type);
			return status;
		}
	}
	return 0;
}

static int lancer_fw_download(struct be_adapter *adapter,
				const struct firmware *fw)
{
#define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
	struct device *dev = &adapter->pdev->dev;
	struct be_dma_mem flash_cmd;
	const u8 *data_ptr = NULL;
	u8 *dest_image_ptr = NULL;
	size_t image_size = 0;
	u32 chunk_size = 0;
	u32 data_written = 0;
	u32 offset = 0;
	int status = 0;
	u8 add_status = 0;
	u8 change_status;

	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
		dev_err(dev, "FW image size should be multiple of 4\n");
		status = -EINVAL;
		goto lancer_fw_exit;
	}

	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
				+ LANCER_FW_DOWNLOAD_CHUNK;
	flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
						&flash_cmd.dma);
	if (!flash_cmd.va) {
		status = -ENOMEM;
		dev_err(dev, "Memory allocation failure while flashing\n");
		goto lancer_fw_exit;
	}

	dest_image_ptr = flash_cmd.va +
				sizeof(struct lancer_cmd_req_write_object);
	image_size = fw->size;
	data_ptr = fw->data;

	while (image_size) {
		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);

		/* Copy the image chunk content. */
		memcpy(dest_image_ptr, data_ptr, chunk_size);

		status = lancer_cmd_write_object(adapter, &flash_cmd,
						 chunk_size, offset,
						 LANCER_FW_DOWNLOAD_LOCATION,
						 &data_written, &change_status,
						 &add_status);

		if (status)
			break;

		offset += data_written;
		data_ptr += data_written;
		image_size -= data_written;
		yield();
	}

	if (!status) {
		/* Commit the FW written */
		status = lancer_cmd_write_object(adapter, &flash_cmd,
						 0, offset,
						 LANCER_FW_DOWNLOAD_LOCATION,
						 &data_written, &change_status,
						 &add_status);
	}

	pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
				flash_cmd.dma);
	if (status)
		goto lancer_fw_exit;

	if (pci_num_vf(adapter->pdev)) {
		dev_info(dev, "Reboot server to activate new FW\n");
		goto lancer_fw_exit;
	}

	if (change_status == LANCER_FW_RESET_NEEDED) {
		dev_info(dev, "Resetting adapter to activate new FW\n");
		status = lancer_physdev_ctrl(adapter,
					     PHYSDEV_CONTROL_FW_RESET_MASK);
		if (status) {
			dev_err(dev, "Adapter busy, could not reset FW\n");
			dev_err(dev, "Reboot server to activate new FW\n");
			goto lancer_fw_exit;
		}
	} else if (change_status != LANCER_NO_RESET_NEEDED) {
		dev_info(dev, "Reboot server to activate new FW\n");
	}
	dev_info(dev, "Firmware flashed successfully\n");
lancer_fw_exit:
	return status;
}

#define UFI_TYPE2		2
#define UFI_TYPE3		3
#define UFI_TYPE3R		10
#define UFI_TYPE4		4
#define UFI_TYPE4R		11
static int be_get_ufi_type(struct be_adapter *adapter,
			   struct flash_file_hdr_g3 *fhdr)
{
	if (fhdr == NULL)
		goto be_get_ufi_exit;

	if (skyhawk_chip(adapter) && fhdr->build[0] == '4') {
		if (fhdr->asic_type_rev >= 0x10)
			return UFI_TYPE4R;
		else
			return UFI_TYPE4;
	} else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
		if (fhdr->asic_type_rev == 0x10)
			return UFI_TYPE3R;
		else
			return UFI_TYPE3;
	} else if (BE2_chip(adapter) && fhdr->build[0] == '2')
		return UFI_TYPE2;

be_get_ufi_exit:
	dev_err(&adapter->pdev->dev,
		"UFI and Interface are not compatible for flashing\n");
	return -1;
}

static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
{
	struct flash_file_hdr_g3 *fhdr3;
	struct image_hdr *img_hdr_ptr = NULL;
	struct be_dma_mem flash_cmd;
	struct device *dev = &adapter->pdev->dev;
	int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
	const u8 *p;

	flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
	flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
					&flash_cmd.dma);
	if (!flash_cmd.va) {
		status = -ENOMEM;
		dev_err(&adapter->pdev->dev,
			"Memory allocation failure while flashing\n");
		goto be_fw_exit;
	}

	p = fw->data;
	fhdr3 = (struct flash_file_hdr_g3 *)p;

	ufi_type = be_get_ufi_type(adapter, fhdr3);

	num_imgs = le32_to_cpu(fhdr3->num_imgs);
	for (i = 0; i < num_imgs; i++) {
		img_hdr_ptr = (struct image_hdr *)(fw->data +
				(sizeof(struct flash_file_hdr_g3) +
				 i * sizeof(struct image_hdr)));
		if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
			switch (ufi_type) {
			case UFI_TYPE4R:
				status = be_flash_skyhawk(adapter, fw,
							  &flash_cmd, num_imgs);
				break;
			case UFI_TYPE4:
				/* Do not flash this on SH B0 cards */
				if (adapter->asic_rev < 0x10)
					status = be_flash_skyhawk(adapter, fw,
							  &flash_cmd, num_imgs);
				else {
					status = -1;
					dev_err(dev,
						"Can't load SH A0 UFI on B0\n");
				}
				break;
			case UFI_TYPE3R:
				status = be_flash_BEx(adapter, fw,
						      &flash_cmd, num_imgs);
				break;
			case UFI_TYPE3:
				/* Do not flash this on BE3-R cards */
				if (adapter->asic_rev < 0x10)
					status = be_flash_BEx(adapter, fw,
							&flash_cmd, num_imgs);
				else {
					status = -1;
					dev_err(dev,
						"Can't load BE3 UFI on BE3R\n");
				}
			}
		}
	}

	if (ufi_type == UFI_TYPE2)
		status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
	else if (ufi_type == -1)
		status = -1;

	pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
				flash_cmd.dma);
	if (status) {
		dev_err(dev, "Firmware load error\n");
		goto be_fw_exit;
	}

	dev_info(dev, "Firmware flashed successfully\n");

be_fw_exit:
	return status;
}

int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
{
	const struct firmware *fw;
	int status = 0;

	if (!netif_running(adapter->netdev)) {
		dev_err(&adapter->pdev->dev,
			"Firmware load not allowed (interface is down)\n");
		return -1;
	}

	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
	if (status)
		goto fw_exit;

	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);

	if (lancer_chip(adapter))
		status = lancer_fw_download(adapter, fw);
	else
		status = be_fw_download(adapter, fw);

	if (!status)
		be_cmd_get_fw_ver(adapter, adapter->fw_ver,
				  adapter->fw_on_flash);
fw_exit:
	release_firmware(fw);
	return status;
}

#ifdef NDO_SET_FEATURES_defined
static int be_set_features(struct net_device *netdev, netdev_features_t data)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	adapter->rx_csum = data & NETIF_F_RXCSUM;
	return 0;
}
#endif

#ifdef NDO_BRIDGE_SETLINK_defined
static int be_ndo_bridge_setlink(struct net_device *dev,
				    struct nlmsghdr *nlh)
{
	struct be_adapter *adapter = netdev_priv(dev);
	struct nlattr *attr, *br_spec;
	int rem;
	int status = 0;
	u16 mode = 0;

	if (!sriov_enabled(adapter))
		return -EOPNOTSUPP;

	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);

	nla_for_each_nested(attr, br_spec, rem) {
		if (nla_type(attr) != IFLA_BRIDGE_MODE)
			continue;

		mode = nla_get_u16(attr);
		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
			return -EINVAL;

		status = be_cmd_set_hsw_config(adapter, 0, 0,
					       adapter->if_handle,
					       mode == BRIDGE_MODE_VEPA ?
					       PORT_FWD_TYPE_VEPA :
					       PORT_FWD_TYPE_VEB);
		if (status)
			goto err;

		dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
			 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");

		return status;
	}
err:
	dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
		mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");

	return status;
}

static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
				    struct net_device *dev,
				    u32 filter_mask)
{
	struct be_adapter *adapter = netdev_priv(dev);
	int status = 0;
	u8 hsw_mode;

	if (!sriov_enabled(adapter))
		return 0;

	/* BE and Lancer chips support VEB mode only */
	if (BEx_chip(adapter) || lancer_chip(adapter)) {
		hsw_mode = PORT_FWD_TYPE_VEB;
	} else {
		status = be_cmd_get_hsw_config(adapter, NULL, 0,
					       adapter->if_handle, &hsw_mode);
		if (status)
			return 0;
	}

	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
				       hsw_mode == PORT_FWD_TYPE_VEPA ?
				       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
}
#endif

static struct net_device_ops be_netdev_ops = {
	.ndo_open		= be_open,
	.ndo_stop		= be_close,
	.ndo_start_xmit		= be_xmit,
#ifndef NDO_GET_STATS64_defined
	.ndo_get_stats		= be_get_stats,
#else
	.ndo_get_stats64	= be_get_stats64,
#endif
	.ndo_set_rx_mode	= be_set_rx_mode,
	.ndo_set_mac_address	= be_mac_addr_set,
	.ndo_change_mtu		= be_change_mtu,
	.ndo_validate_addr	= eth_validate_addr,
#ifndef USE_NEW_VLAN_MODEL
	.ndo_vlan_rx_register	= be_vlan_register,
#endif
	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
#ifdef NDO_VF_CFG_defined
	.ndo_set_vf_mac		= be_set_vf_mac,
	.ndo_set_vf_vlan	= be_set_vf_vlan,
	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate,
	.ndo_get_vf_config	= be_get_vf_config,
#endif /* NDO_VF_CFG_defined */
#ifdef NDO_VF_LINK_STATE_defined
	.ndo_set_vf_link_state  = be_set_vf_link_state,
#endif /*NDO_VF_LINK_STATE_defined*/
	.ndo_do_ioctl		= be_do_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= be_netpoll,
#endif
#ifdef NDO_SET_FEATURES_defined
	.ndo_set_features       = be_set_features,
#endif
#ifdef NDO_BRIDGE_SETLINK_defined
	.ndo_bridge_setlink	= be_ndo_bridge_setlink,
	.ndo_bridge_getlink	= be_ndo_bridge_getlink,
#endif
};

static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
{
#ifndef GRO_defined
	struct net_lro_mgr *lro_mgr;
	struct be_rx_obj *rxo;
	int i;

	for_all_rx_queues(adapter, rxo, i) {
		lro_mgr = &rxo->lro_mgr;
		lro_mgr->dev = netdev;
		lro_mgr->features = LRO_F_NAPI;
		lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
		lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
		lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
		lro_mgr->lro_arr = rxo->lro_desc;
		lro_mgr->get_frag_header = be_get_frag_header;
		lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
	}
#endif
}

static int be_netdev_init(struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);

	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
		NETIF_F_HW_VLAN_CTAG_TX;
	if (be_multi_rxq(adapter))
		netdev->hw_features |= NETIF_F_RXHASH;

	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;

	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;

	netdev->features |= NETIF_F_VLAN_SG | NETIF_F_VLAN_TSO |
			    NETIF_F_VLAN_CSUM;

	/* For lancer, Even if privileges are not set,
	* PF could have set allowed vlan list, in which case,
	* we need to specify this capability for VF also.*/
	if ((adapter->cmd_privileges & (BE_PRIV_FILTMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG)) || be_physfn(adapter) || lancer_chip(adapter))
		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
	else
		netdev->features |= NETIF_F_VLAN_CHALLENGED;

#ifdef GRO_defined
	netdev->features |= NETIF_F_GRO;
	adapter->gro_supported = true;
#endif

	netdev->flags |= IFF_MULTICAST;

	adapter->flags &= ~BE_FLAGS_LINK_STATUS_INITED;
	netif_carrier_off(netdev);

	adapter->rx_csum = true;

	/* Default settings for Rx and Tx flow control */
	adapter->rx_fc = true;
	adapter->tx_fc = true;

	netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);

	if (tx_prio)
                be_netdev_ops.ndo_select_queue = be_select_queue;
	BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);

	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
#ifdef ETHTOOL_OPS_EXT_defined
	set_ethtool_ops_ext(netdev, &be_ethtool_ops_ext);
#endif

	be_lro_init(adapter, netdev);

	return 0;
}

static void be_unmap_pci_bars(struct be_adapter *adapter)
{
	if (adapter->csr)
		pci_iounmap(adapter->pdev, adapter->csr);
	if (adapter->db)
		pci_iounmap(adapter->pdev, adapter->db);
}

static int db_bar(struct be_adapter *adapter)
{
	if (lancer_chip(adapter) || !be_physfn(adapter))
		return 0;
	else
		return 4;
}

static int be_roce_map_pci_bars(struct be_adapter *adapter)
{
	if (skyhawk_chip(adapter)) {
		adapter->roce_db.size = 4096;
		adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
							      db_bar(adapter));
		adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
							       db_bar(adapter));
	}
	return 0;
}

static int be_map_pci_bars(struct be_adapter *adapter)
{
	struct pci_dev *pdev = adapter->pdev;
	u8 __iomem *addr;

	if (be_physfn(adapter) && !lancer_chip(adapter)) {
		adapter->netdev->mem_start = pci_resource_start(pdev, 2);
		adapter->netdev->mem_end = pci_resource_start(pdev, 2) +
					pci_resource_len(pdev, 2);
	}

	if (BEx_chip(adapter) && be_physfn(adapter)) {
		adapter->csr = pci_iomap(adapter->pdev, 2, 0);
		if (adapter->csr == NULL)
			return -ENOMEM;
	}

	addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
	if (addr == NULL)
		goto pci_map_err;
	adapter->db = addr;

	be_roce_map_pci_bars(adapter);
	return 0;
pci_map_err:
	be_unmap_pci_bars(adapter);
	return -ENOMEM;
}

static void be_ctrl_cleanup(struct be_adapter *adapter)
{
	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;

	be_unmap_pci_bars(adapter);

	if (mem->va)
		pci_free_consistent(adapter->pdev, mem->size,
			mem->va, mem->dma);

	mem = &adapter->rx_filter;
	if (mem->va)
		pci_free_consistent(adapter->pdev, mem->size,
			mem->va, mem->dma);
}

static int be_ctrl_init(struct be_adapter *adapter)
{
	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
	struct be_dma_mem *rx_filter = &adapter->rx_filter;
	u32 sli_intf;
	int status;

	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
	adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
				SLI_INTF_FAMILY_SHIFT;
	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;

	status = be_map_pci_bars(adapter);
	if (status)
		goto done;

	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
	mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
				mbox_mem_alloc->size, &mbox_mem_alloc->dma);
	if (!mbox_mem_alloc->va) {
		status = -ENOMEM;
		goto unmap_pci_bars;
	}
	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));

	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
	rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
					&rx_filter->dma, GFP_KERNEL);
	if (rx_filter->va == NULL) {
		status = -ENOMEM;
		goto free_mbox;
	}
	memset(rx_filter->va, 0, rx_filter->size);

	mutex_init(&adapter->mbox_lock);
	spin_lock_init(&adapter->mcc_lock);
	spin_lock_init(&adapter->mcc_cq_lock);

	init_completion(&adapter->et_cmd_compl);
	pci_save_state(adapter->pdev);
	return 0;

free_mbox:
	pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
		mbox_mem_alloc->va, mbox_mem_alloc->dma);

unmap_pci_bars:
	be_unmap_pci_bars(adapter);

done:
	return status;
}

static void be_stats_cleanup(struct be_adapter *adapter)
{
	struct be_dma_mem *cmd = &adapter->stats_cmd;

	if (cmd->va)
		pci_free_consistent(adapter->pdev, cmd->size,
			cmd->va, cmd->dma);
}

static int be_stats_init(struct be_adapter *adapter)
{
	struct be_dma_mem *cmd = &adapter->stats_cmd;

	if (lancer_chip(adapter))
		cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
	else if (BE2_chip(adapter))
		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
	else if (BE3_chip(adapter))
		/* BE3 */
		cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
	else
		/* Skyhawk */
		cmd->size = sizeof(struct be_cmd_req_get_stats_v2);

	cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
	if (cmd->va == NULL)
		return -1;
	memset(cmd->va, 0, cmd->size);
	return 0;
}

static void be_remove(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);

	if (!adapter)
		return;

	be_roce_dev_remove(adapter);
	be_intr_set(adapter, false);
//	be_sysfs_remove_group(adapter);

	cancel_delayed_work_sync(&adapter->func_recovery_work);

	unregister_netdev(adapter->netdev);

	be_clear(adapter);

	/* tell fw we're done with firing cmds */
	be_cmd_fw_clean(adapter);

	be_stats_cleanup(adapter);

	be_ctrl_cleanup(adapter);

	pci_disable_pcie_error_reporting(pdev);

	pci_set_drvdata(pdev, NULL);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
	free_netdev(adapter->netdev);
}

static int be_get_initial_config(struct be_adapter *adapter)
{
	int status;

	status = be_cmd_get_cntl_attributes(adapter);
	if (status)
		return status;

	/* Must be a power of 2 or else MODULO will BUG_ON */
	adapter->be_get_temp_freq = 64;

	adapter->rx_frag_size = rx_frag_size;

	adapter->cfg_num_qs = netif_get_num_default_rss_queues();
	return 0;
}

static int lancer_recover_func(struct be_adapter *adapter)
{
	struct device *dev = &adapter->pdev->dev;
	int status;

	adapter->flags |= BE_FLAGS_TRY_RECOVERY;

	status = lancer_test_and_set_rdy_state(adapter);
	if (status)
		goto err;

	if (netif_running(adapter->netdev))
		be_close(adapter->netdev);

	be_clear(adapter);

	be_clear_all_error(adapter);

	status = be_setup(adapter);
	if (status)
		goto err;

	if (netif_running(adapter->netdev)) {
		status = be_open(adapter->netdev);
		if (status)
			goto err;
	}

	adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;

	dev_err(dev, "Adapter recovery succeeded\n");
	return 0;
err:
	if (status == -EAGAIN)
		dev_err(dev, "Waiting for resource provisioning\n");
	else
		dev_err(dev, "Adapter recovery failed\n");

	return status;
}

void be_detect_error(struct be_adapter *adapter)
{
	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0, i;
	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
	struct net_device *netdev = adapter->netdev;
	struct device *dev = &adapter->pdev->dev;
	bool error_detected = false;

	if (be_hw_error(adapter))
		return;

	if (lancer_chip(adapter)) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
			sliport_err1 = ioread32(adapter->db +
					SLIPORT_ERROR1_OFFSET);
			sliport_err2 = ioread32(adapter->db +
					SLIPORT_ERROR2_OFFSET);
			adapter->hw_error = true;
			/* Do not log error messages if its a FW reset */
			if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
			    sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
				dev_info(dev, "Firmware update in progress\n");
			} else {
				error_detected = true;
				dev_err(dev, "Error detected in the card\n");
				dev_err(dev, "ERR: sliport status 0x%x\n",
					sliport_status);
				dev_err(dev, "ERR: sliport error1 0x%x\n",
					sliport_err1);
				dev_err(dev, "ERR: sliport error2 0x%x\n",
					sliport_err2);
			}
		}
	} else {
		pci_read_config_dword(adapter->pdev,
				PCICFG_UE_STATUS_LOW, &ue_lo);
		pci_read_config_dword(adapter->pdev,
				PCICFG_UE_STATUS_HIGH, &ue_hi);
		pci_read_config_dword(adapter->pdev,
				PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
		pci_read_config_dword(adapter->pdev,
				PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);

		ue_lo = (ue_lo & ~ue_lo_mask);
		ue_hi = (ue_hi & ~ue_hi_mask);

		/* On certain platforms BE hardware can indicate spurious UEs.
		 * Allow HW to stop working completely in case of a real UE.
		 * Hence not setting the hw_error for UE detection.
		 */
		if (ue_lo || ue_hi) {
			error_detected = true;
			dev_err(dev,
				"Unrecoverable Error detected in the adapter");
			dev_err(dev, "Please reboot server to recover");
			if (skyhawk_chip(adapter))
				adapter->hw_error = true;
			for (i = 0; ue_lo; ue_lo >>= 1, i++) {
				if (ue_lo & 1)
					dev_err(dev, "UE LOW: %s bit set\n",
						ue_status_low_desc[i]);
			}
			for (i = 0; ue_hi; ue_hi >>= 1, i++) {
				if (ue_hi & 1)
					dev_err(dev, "UE HIGH: %s bit set\n",
						ue_status_hi_desc[i]);
			}
		}
	}
	if (error_detected)
			netif_carrier_off(netdev);
}

static void be_func_recovery_task(struct work_struct *work)
{
	struct be_adapter *adapter =
		container_of(work, struct be_adapter,  func_recovery_work.work);
	int status = 0;

	be_detect_error(adapter);

	if (adapter->hw_error && lancer_chip(adapter)) {

		rtnl_lock();
		netif_device_detach(adapter->netdev);
		rtnl_unlock();

		status = lancer_recover_func(adapter);
		if (!status)
			netif_device_attach(adapter->netdev);
	}
	/* Temporary workaround. Always do recovery on VFs
	 * will remove this once BZ 154818 is addressed in Lancer FW
	 */
	if (status && be_virtfn(adapter))
		status = -EAGAIN;

	/* In Lancer, for all errors other than provisioning error (-EAGAIN),
	 * no need to attempt further recovery.
	 */
	if (!status || status == -EAGAIN)
		schedule_delayed_work(&adapter->func_recovery_work,
				      msecs_to_jiffies(1000));
}

static void be_worker(struct work_struct *work)
{
	struct be_adapter *adapter =
		container_of(work, struct be_adapter, work.work);
	struct be_rx_obj *rxo;
	int i;

	/* If interrupts are not enabled, just reap pending mcc completions */
	if (!netif_running(adapter->netdev)) {
		be_process_mcc(adapter);
		goto reschedule;
	}

	if (!adapter->stats_cmd_sent) {
		if (lancer_chip(adapter))
			lancer_cmd_get_pport_stats(adapter,
					&adapter->stats_cmd);
		else
			be_cmd_get_stats(adapter, &adapter->stats_cmd);
	}

	if (be_physfn(adapter) &&
	    MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
		be_cmd_get_die_temperature(adapter);

	for_all_rx_queues(adapter, rxo, i) {
		if (rxo->rx_post_starved) {
			rxo->rx_post_starved = false;
			be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
		}
	}

	be_eqd_update(adapter);

	if (adapter->flags & BE_FLAGS_MAC_LEARNING_INITIALIZED)
		be_evict_mac(adapter);

reschedule:
	adapter->work_counter++;
	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}

/* If any VFs are already enabled don't FLR the PF */
static bool be_reset_required(struct be_adapter *adapter)
{
	return pci_num_vf(adapter->pdev) > 0 ? false : true;
}

static char *port_speed_string(struct be_adapter *adapter)
{
	char *str = ""; /* default */
	u16 speeds_supported;

	speeds_supported = adapter->phy.fixed_speeds_supported |
			   adapter->phy.auto_speeds_supported;
	if (speeds_supported & BE_SUPPORTED_SPEED_40GBPS)
		str = "40Gbps NIC ";
	else if (speeds_supported & BE_SUPPORTED_SPEED_10GBPS)
		str = "10Gbps NIC ";
	else if (speeds_supported & BE_SUPPORTED_SPEED_1GBPS)
		str = "1Gbps NIC ";

	return str;
}

static char *mc_name(struct be_adapter *adapter)
{
	char *str = ""; /* default */

	switch (adapter->mc_type) {
	case UMC:
		str = "UMC";
		break;
	case FLEX10:
		str = "FLEX 10";
		break;
	case vNIC1:
		str = "vNIC-1";
		break;
	case nPAR:
		str = "nPAR";
		break;
	case UFP:
		str = "UFP";
		break;
	case vNIC2:
		str = "vNIC-2";
		break;
	default:
		str = "";
	}

	return str;
}

static inline char *func_name(struct be_adapter *adapter)
{
	return be_physfn(adapter) ? "PF" : "VF";
}

static int be_probe(struct pci_dev *pdev,
			const struct pci_device_id *pdev_id)
{
	int status = 0;
	struct be_adapter *adapter;
	struct net_device *netdev;
	char port_name;

	status = pci_enable_device(pdev);
	if (status)
		goto do_none;

	status = pci_request_regions(pdev, DRV_NAME);
	if (status)
		goto disable_dev;
	pci_set_master(pdev);

	netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
	if (netdev == NULL) {
		status = -ENOMEM;
		goto rel_reg;
	}
	adapter = netdev_priv(netdev);
	adapter->pdev = pdev;
	pci_set_drvdata(pdev, adapter);

	adapter->netdev = netdev;
	SET_NETDEV_DEV(netdev, &pdev->dev);

	status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
	if (!status) {
		netdev->features |= NETIF_F_HIGHDMA;
	} else {
		status = dma_set_mask_and_coherent(&pdev->dev,
						   DMA_BIT_MASK(32));
		if (status) {
			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
			goto free_netdev;
		}
	}

	if (be_physfn(adapter)) {
		status = pci_enable_pcie_error_reporting(pdev);
		if (!status)
			dev_info(&pdev->dev, "PCIe error reporting enabled\n");
	}

	status = be_ctrl_init(adapter);
	if (status)
		goto free_netdev;

	/* sync up with fw's ready state */
	if (be_physfn(adapter)) {
		status = be_fw_wait_ready(adapter);
		if (status)
			goto ctrl_clean;
	}

	if (be_reset_required(adapter)) {
		status = be_cmd_reset_function(adapter);
		if (status)
			goto ctrl_clean;

		/* Wait for interrupts to quiesce after an FLR */
		msleep(100);
	}

	/* Allow interrupts for other ULPs running on NIC function */
	be_intr_set(adapter, true);
	
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		goto ctrl_clean;

	status = be_stats_init(adapter);
	if (status)
		goto ctrl_clean;

	status = be_get_initial_config(adapter);
	if (status)
		goto stats_clean;

	INIT_DELAYED_WORK(&adapter->work, be_worker);
	INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);

	adapter->rx_fc = adapter->tx_fc = true;

	status = be_setup(adapter);
	if (status)
		goto stats_clean;

	status = be_netdev_init(netdev);
	if (status)
		goto unsetup;

	status = register_netdev(netdev);
	if (status != 0)
		goto unsetup;

	be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);

	/* In OSes that don't set the ASSIGNED flags, block
	 * unloading the driver to prevent kernel crashing
	 */
#ifndef PCI_FLAGS_ASSIGNED_defined
	if (sriov_enabled(adapter))
		try_module_get(THIS_MODULE);
#endif
	be_cmd_query_port_name(adapter, &port_name);

	be_roce_dev_add(adapter);
	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));

	dev_info(&pdev->dev, "%s: %s \"%s\" %s %s port %c\n", nic_name(pdev),
		 port_speed_string(adapter), adapter->model_number,
		 func_name(adapter), mc_name(adapter), port_name);

//	be_sysfs_create_group(adapter);

#ifdef DEV_NETMAP
	be2net_netmap_attach(adapter);
#endif
	return 0;

unsetup:
	be_clear(adapter);
stats_clean:
	be_stats_cleanup(adapter);
ctrl_clean:
	be_ctrl_cleanup(adapter);
free_netdev:
	free_netdev(netdev);
	pci_set_drvdata(pdev, NULL);
rel_reg:
	pci_release_regions(pdev);
disable_dev:
	pci_disable_device(pdev);
do_none:
	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
	return status;
}

static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	if (adapter->wol_en)
		be_setup_wol(adapter, true);

	be_intr_set(adapter, false);
	cancel_delayed_work_sync(&adapter->func_recovery_work);

	netif_device_detach(netdev);
	if (netif_running(netdev)) {
		rtnl_lock();
		be_close(netdev);
		rtnl_unlock();
	}
	be_clear(adapter);

	pci_save_state(pdev);
	pci_disable_device(pdev);
	pci_set_power_state(pdev, pci_choose_state(pdev, state));
	return 0;
}

static int be_resume(struct pci_dev *pdev)
{
	int status = 0;
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	netif_device_detach(netdev);

	status = pci_enable_device(pdev);
	if (status)
		return status;

	pci_set_power_state(pdev, 0);
	pci_restore_state(pdev);

	if (be_physfn(adapter)) {
		status = be_fw_wait_ready(adapter);
		if (status)
			return status;
	}

	be_intr_set(adapter, true);
	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		return status;

	be_setup(adapter);
	if (netif_running(netdev)) {
		rtnl_lock();
		be_open(netdev);
		rtnl_unlock();
	}

	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
	netif_device_attach(netdev);

	if (adapter->wol_en)
		be_setup_wol(adapter, false);

	return 0;
}

/*
 * An FLR will stop BE from DMAing any data.
 */
static void be_shutdown(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);

	if (!adapter)
		return;

	be_roce_dev_shutdown(adapter);
	cancel_delayed_work_sync(&adapter->work);
	cancel_delayed_work_sync(&adapter->func_recovery_work);

	netif_device_detach(adapter->netdev);

	be_cmd_reset_function(adapter);

	be_intr_set(adapter, false);
	be_irq_unregister(adapter);
	be_msix_disable(adapter);

	pci_disable_device(pdev);
}

static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
				pci_channel_state_t state)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	dev_err(&adapter->pdev->dev, "EEH error detected\n");

	if (!adapter->eeh_error) {
		adapter->eeh_error = true;

		cancel_delayed_work_sync(&adapter->func_recovery_work);

		rtnl_lock();
		netif_device_detach(netdev);
		if (netif_running(netdev))
			be_close(netdev);
		rtnl_unlock();

		be_clear(adapter);
	}

	if (state == pci_channel_io_perm_failure)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_disable_device(pdev);

	/* The error could cause the FW to trigger a flash debug dump.
	 * Resetting the card while flash dump is in progress
	 * can cause it not to recover; wait for it to finish.
	 * Need to wait only once per adapter.
	 */
	if (pdev->devfn == 0)
		ssleep(30);

	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
{
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	int status;

	dev_info(&adapter->pdev->dev, "EEH reset\n");

	status = pci_enable_device(pdev);
	if (status)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_set_master(pdev);
	pci_set_power_state(pdev, 0);
	pci_restore_state(pdev);

	/* Check if card is ok and fw is ready */
	dev_info(&adapter->pdev->dev,
		 "Waiting for FW to be ready after EEH reset\n");
	status = be_fw_wait_ready(adapter);
	if (status)
		return PCI_ERS_RESULT_DISCONNECT;

	pci_cleanup_aer_uncorrect_error_status(pdev);
	be_clear_all_error(adapter);
	return PCI_ERS_RESULT_RECOVERED;
}

static void be_eeh_resume(struct pci_dev *pdev)
{
	int status = 0;
	struct be_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev =  adapter->netdev;

	dev_info(&adapter->pdev->dev, "EEH resume\n");

	pci_save_state(pdev);

	status = be_cmd_reset_function(adapter);
	if (status)
		goto err;

	/* tell fw we're ready to fire cmds */
	status = be_cmd_fw_init(adapter);
	if (status)
		goto err;

	status = be_setup(adapter);
	if (status)
		goto err;

	if (netif_running(netdev)) {
		status = be_open(netdev);
		if (status)
			goto err;
	}

	schedule_delayed_work(&adapter->func_recovery_work,
			      msecs_to_jiffies(1000));
	netif_device_attach(netdev);

	return;
err:
	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
}

static struct pci_error_handlers be_eeh_handlers = {
	.error_detected = be_eeh_err_detected,
	.slot_reset = be_eeh_reset,
	.resume = be_eeh_resume,
};

static struct pci_driver be_driver = {
	.name = DRV_NAME,
	.id_table = be_dev_ids,
	.probe = be_probe,
	.remove = be_remove,
	.suspend = be_suspend,
	.resume = be_resume,
	.shutdown = be_shutdown,
	.err_handler = &be_eeh_handlers
};

static int __init be_init_module(void)
{
	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
	    rx_frag_size != 2048) {
		printk(KERN_WARNING DRV_NAME
			" : Module param rx_frag_size must be 2048/4096/8192."
			" Using 2048\n");
		rx_frag_size = 2048;
	}

	return pci_register_driver(&be_driver);
}
module_init(be_init_module);

static void __exit be_exit_module(void)
{
	pci_unregister_driver(&be_driver);
}
module_exit(be_exit_module);
