/*
 * Copyright (C) 2016
 *
 * Brick Yang <printfxxx@163.com>
 *
 * This program is free software. You can redistribute it and/or
 * modify it as you like.
 */

/**
 * @file	dpaa.c
 * @brief	DPAA ethernet driver for simplebit
 */

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/jhash.h>
#include <linux/kallsyms.h>
#include <linux/etherdevice.h>
#include <linux/of_mdio.h>
#include <linux/ethtool.h>
#include <linux/fsl_qman.h>
#include <linux/fsl_bman.h>
#include <linux/platform_device.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
#include <linux/dma-map-ops.h>
#endif

#include <mtrace.h>
#include <netdev.h>
#include <worker.h>
#include <cmd.h>

#include <qman_private.h>
#include <bman_private.h>
#undef TRUE
#undef FALSE
#include <dpaa_eth_common.h>
#include <mac.h>
#include <lnxwrp_fm.h>
#include <lnxwrp_fsl_fman.h>
#undef __ERR_MODULE__
#define __ERR_MODULE__	MODULE_FM_PORT
#include <fm_port.h>
#undef __ERR_MODULE__
#define __ERR_MODULE__	MODULE_FM_MAC
#ifdef QORIQ
#undef DEFAULT_exceptions
#undef GET_EXCEPTION_FLAG
#undef HASH_TABLE_SIZE
#include <dtsec.h>
#undef DEFAULT_exceptions
#undef GET_EXCEPTION_FLAG
#undef HASH_TABLE_SIZE
#include <tgec.h>
#endif	/* QORIQ */
#undef DEFAULT_exceptions
#undef GET_EXCEPTION_FLAG
#undef HASH_TABLE_SIZE
#include <memac.h>
#ifdef QORIQ
#include <fsl_fman_dtsec.h>
#include <fsl_fman_tgec.h>
#endif
#include <fsl_fman_memac.h>

#include "qman.h"
#include "bman.h"

#define DPAA_MAX_FRM_SZ		1536
#define DPAA_FRM_DATA_ALIGN	SMP_CACHE_BYTES
#define DPAA_DPA_BP_SZ		(DPAA_MAX_FRM_SZ + DPAA_FRM_DATA_ALIGN)

#define DPAA_FQ_TBL_HASH_BITS	14

#define dpaa_fq_table_for_each_entry(e, i, tmp)						\
	for (tmp = 0, e = &dpaa_fq_table[(i) & jhash_mask(DPAA_FQ_TBL_HASH_BITS)];	\
	     (tmp < jhash_size(DPAA_FQ_TBL_HASH_BITS)) || (e = NULL, 0);		\
	     tmp++, e = &dpaa_fq_table[((i) + tmp) & jhash_mask(DPAA_FQ_TBL_HASH_BITS)])

struct dpaa_fq;
typedef void (*qman_cb_dqrr_t)(struct qman_swp *, struct dpaa_fq *,
			       const struct qm_dqrr_entry __iomem *);

typedef struct dpaa_io_info {
	struct qman_swp *qm_swp;
	struct bman_swp *bm_swp;
	struct qm_portal_config *qp_cfg;
	struct bm_portal_config *bp_cfg;
} dpaa_io_info_t;

typedef struct dpaa_mac {
	int type;
#define FM_MAC_DTSEC	0
#define FM_MAC_XGEC	1
#define FM_MAC_MEMAC	2
	struct mac_device *mdev;
	atomic64_t rx_packets, rx_bytes;
} dpaa_mac_t;

typedef struct dpaa_ch {
	u16 chid;
} dpaa_ch_t;

typedef struct dpaa_bp {
	u8 bpid;
	size_t sz;
} dpaa_bp_t;

typedef struct dpaa_fq {
	u32 fqid;
	struct platform_device *pdev;
	qman_cb_dqrr_t dqrr;
}  dpaa_fq_t;

typedef struct dpaa_ndev {
	unsigned int flags;
#define DPAA_NDEV_F_PAUSE_AUTONEG	0x1u
	struct net_device *ndev;
	dpaa_mac_t *mac;
	dpaa_bp_t *rx_bp, *tx_clean_bp;
	dpaa_ch_t *ch;
	dpaa_fq_t *rx_fqs, *tx_fqs, *tx_conf_fqs,
		  *rx_err_fq, *tx_err_fq;
} dpaa_ndev_t;

typedef struct dpaa_fq_entry {
	atomic_t key_fqid;
	dpaa_fq_t *fq;
} dpaa_fq_entry_t;

static bool use_tx_conf = false;
static unsigned int skb_buf_nr;
static unsigned int poll_dqrr_budget = 32;
static dpaa_fq_entry_t dpaa_fq_table[jhash_size(DPAA_FQ_TBL_HASH_BITS)];

static DEFINE_PER_CPU(dpaa_io_info_t, cpu_dpaa_io_infos);

module_param_named(tx_conf, use_tx_conf, bool, S_IRUGO);
MODULE_PARM_DESC(tx_conf, "Use tx confirm");

module_param_named(bufs, skb_buf_nr, uint, S_IRUGO);
MODULE_PARM_DESC(bufs, "Number of skb buffers");

module_param_named(budget, poll_dqrr_budget, uint, S_IRUGO);
MODULE_PARM_DESC(budget, "Budget of poll dqrr");

static DEFINE_KSYM_PTR(qm_get_unused_portal);
static DEFINE_KSYM_PTR(qm_put_unused_portal);
static DEFINE_KSYM_PTR(bm_get_unused_portal);
static DEFINE_KSYM_PTR(bm_put_unused_portal);
static DEFINE_KSYM_PTR(FM_MAC_ResetCounters);
static DEFINE_KSYM_PTR(FM_MAC_GetStatistics);

static long cpu_qman_swp_create(void *arg)
{
	long rc;
	unsigned int cpu;
	dpaa_io_info_t *info;
	struct qman_swp *swp;
	struct qman_swp_desc *desc;

	cpu = smp_processor_id();
	info = per_cpu_ptr(&cpu_dpaa_io_infos, cpu);
	if (!(info->qp_cfg = CALL_KSYM_PTR(qm_get_unused_portal))) {
		pr_err("CPU %u: failed to get qman portal\n", cpu);
		rc = -ENODEV;
		goto err;
	}
	info->qp_cfg->public_cfg.is_shared = 0;
	info->qp_cfg->public_cfg.cpu = cpu;

	if (!(desc = kzalloc(sizeof(*desc), GFP_KERNEL)) || MTRACE_KMEM_ADD(desc)) {
		pr_err("%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err_alloc;
	}

	desc->cena_bar = info->qp_cfg->addr_virt[0];
	desc->cinh_bar = info->qp_cfg->addr_virt[1];
	desc->flags = SWP_F_EST | SWP_F_RE | SWP_F_EQCR_VB | SWP_F_DQRR_VB | SWP_F_MR_VB;

	if (!(swp = qman_swp_init(desc))) {
		pr_err("QM_SWP%u: failed to init qman sw portal\n", info->qp_cfg->public_cfg.index);
		rc = -EIO;
		goto err_qman_swp_init;
	}

	info->qm_swp = swp;
	return 0;

err_qman_swp_init:
	MTRACE_KMEM_DEL(desc);
	kfree(desc);
err_alloc:
	CALL_KSYM_PTR(qm_put_unused_portal, info->qp_cfg);
	info->qp_cfg = NULL;
err:
	return rc;
}

static long cpu_qman_swp_destroy(void *arg)
{
	int rc;
	dpaa_io_info_t *info;
	struct qman_swp *swp;
	const struct qm_mr_entry __iomem *mr;
	const struct qm_dqrr_entry __iomem *dq;
	const struct qman_swp_desc *desc;

	info = this_cpu_ptr(&cpu_dpaa_io_infos);
	swp = info->qm_swp;

	if (swp) {
		while ((dq = qman_swp_dqrr_next(swp))) {
			pr_warn("CPU %u: unexpected DQRR remained, verb=%x\n",
				info->qp_cfg->public_cfg.cpu, dq->verb & QM_DQRR_VERB_MASK);
			rc = qman_swp_dqrr_consume(swp, dq, true);
			BUG_ON(rc);
		}
		while ((mr = qman_swp_mr_next(swp))) {
			switch (mr->verb & QM_MR_VERB_TYPE_MASK) {
			case QM_MR_VERB_FQRN:
			case QM_MR_VERB_FQRNI:
			case QM_MR_VERB_FQRL:
			case QM_MR_VERB_FQPN:
				break;
			default:
				pr_warn("CPU %u: unexpected MR remained, verb=%x\n",
					info->qp_cfg->public_cfg.cpu, mr->verb & QM_MR_VERB_TYPE_MASK);
				break;
			}
			rc = qman_swp_mr_consume(swp, mr, true);
			BUG_ON(rc);
		}
		desc = swp->desc;
		qman_swp_finish(swp);
		MTRACE_KMEM_DEL(desc);
		kfree(desc);
	}

	if (info->qp_cfg) {
		CALL_KSYM_PTR(qm_put_unused_portal, info->qp_cfg);
		info->qp_cfg = NULL;
	}

	return 0;
}

static long cpu_bman_swp_create(void *arg)
{
	long rc;
	unsigned int cpu;
	dpaa_io_info_t *info;
	struct bman_swp *swp;
	struct bman_swp_desc *desc;

	cpu = smp_processor_id();
	info = per_cpu_ptr(&cpu_dpaa_io_infos, cpu);
	if (!(info->bp_cfg = CALL_KSYM_PTR(bm_get_unused_portal))) {
		pr_err("CPU %u: failed to get bman portal\n", cpu);
		rc = -ENODEV;
		goto err;
	}
	info->bp_cfg->public_cfg.is_shared = 0;
	info->bp_cfg->public_cfg.cpu = cpu;

	if (!(desc = kzalloc(sizeof(*desc), GFP_KERNEL)) || MTRACE_KMEM_ADD(desc)) {
		pr_err("%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err_alloc;
	}
	desc->cena_bar = info->bp_cfg->addr_virt[0];
	desc->cinh_bar = info->bp_cfg->addr_virt[1];

	if (!(swp = bman_swp_init(desc))) {
		pr_err("BM_SWP%u: failed to init bman sw portal\n", info->bp_cfg->public_cfg.index);
		rc = -EIO;
		goto err_bman_swp_init;
	}

	info->bm_swp = swp;
	return 0;

err_bman_swp_init:
	MTRACE_KMEM_DEL(desc);
	kfree(desc);
err_alloc:
	CALL_KSYM_PTR(bm_put_unused_portal, info->bp_cfg);
	info->bp_cfg = NULL;
err:
	return rc;
}

static long cpu_bman_swp_destroy(void *arg)
{
	dpaa_io_info_t *info;
	const struct bman_swp_desc *desc;

	info = this_cpu_ptr(&cpu_dpaa_io_infos);

	if (info->bm_swp) {
		desc = info->bm_swp->desc;
		bman_swp_finish(info->bm_swp);
		MTRACE_KMEM_DEL(desc);
		kfree(desc);
	}

	if (info->bp_cfg) {
		CALL_KSYM_PTR(bm_put_unused_portal, info->bp_cfg);
		info->bp_cfg = NULL;
	}

	return 0;
}

static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	u8 cmd_verb;
	int rc;
	dpaa_fq_t *fq;
	dma_addr_t baddr;
	dpaa_ndev_t *netdev;
	struct qm_fd fd;
	unsigned int txq;
	struct qman_swp *swp;
	struct qm_eqcr_entry __iomem *eq;

	netdev = netdev_priv(ndev);
	swp = this_cpu_ptr(&cpu_dpaa_io_infos)->qm_swp;
	if (!(eq = qman_swp_eqcr_next(swp, &cmd_verb))) {
		goto err;
	}
	baddr = sb_skbh_to_baddr(skb->head);
	txq = skb_get_queue_mapping(skb);
	if (txq >= ndev->real_num_tx_queues) {
		txq = txq % ndev->real_num_tx_queues;
	}
	fq = &netdev->tx_fqs[txq];
	qman_swp_cmd_zero(eq);
	cmd_verb |= QM_EQCR_VERB_CMD_ENQUEUE;
	eq->fqid = cpu_to_be32(fq->fqid);
	fd.opaque_addr = 0;
	fd.opaque = 0;
	fd.addr = baddr;
	fd.offset = skb_headroom(skb);
	fd.length20 = skb->len;
	fd.bpid = use_tx_conf ? 0 : netdev->tx_clean_bp->bpid;
	fd.format = qm_fd_contig;
	eq->fd.opaque_addr = cpu_to_be64(fd.opaque_addr);
	eq->fd.opaque = cpu_to_be32(fd.opaque);
	rc = qman_swp_eqcr_submit(swp, eq, cmd_verb, true);
	BUG_ON(rc);

	return NETDEV_TX_OK;
err:
	return NETDEV_TX_BUSY;
}

static inline u64 dpaa_mac_cntr_update_return(atomic64_t *v, u64 val)
{
	u32 upper, lower;
	u64 old, new, cur;

	while (1) {
		old = atomic64_read(v);
		if (val >> 32) {
			new = val;
		} else {
			lower = (u32)val;
			upper = (old >> 32) + (lower < (u32)old ? 1 : 0);
			new = (u64)upper << 32 | lower;
		}
		cur = atomic64_cmpxchg(v, old, new);
		new = max(cur, new);
		if ((cur == old) || (cur == new)) {
			break;
		}
	}

	return new;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
static struct net_device_stats *dpaa_get_stats(struct net_device *ndev)
{
	u64 pkts, bytes;
	dpaa_mac_t *mac;
	dpaa_ndev_t *netdev;
	t_FmMacStatistics stat;
	struct mac_device *mdev;

	netdev = netdev_priv(ndev);
	mac = netdev->mac;
	mdev = mac->mdev;
	if (GET_ERROR_TYPE(CALL_KSYM_PTR(FM_MAC_GetStatistics,
					 mdev->get_mac_handle(mdev), &stat))) {
		goto err;
	}
	pkts = stat.ifInPkts;
	bytes = stat.ifInOctets;
	if (mac->type != FM_MAC_DTSEC) {
		pkts += stat.reStatPause + stat.ifInErrors;
	}
	ndev->stats.rx_packets = dpaa_mac_cntr_update_return(&mac->rx_packets, pkts);
	ndev->stats.rx_bytes = dpaa_mac_cntr_update_return(&mac->rx_bytes, bytes);
	goto ok;
err:
ok:
	return &ndev->stats;
}
#else
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
static struct rtnl_link_stats64 *
#else
static void
#endif
dpaa_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats)
{
	u64 pkts, bytes;
	dpaa_mac_t *mac;
	dpaa_ndev_t *netdev;
	t_FmMacStatistics stat;
	struct mac_device *mdev;

	netdev = netdev_priv(ndev);
	mac = netdev->mac;
	mdev = mac->mdev;
	if (GET_ERROR_TYPE(CALL_KSYM_PTR(FM_MAC_GetStatistics,
					 mdev->get_mac_handle(mdev), &stat))) {
		goto err;
	}
	pkts = stat.ifInPkts;
	bytes = stat.ifInOctets;
	if (mac->type != FM_MAC_DTSEC) {
		pkts += stat.reStatPause + stat.ifInErrors;
	}
	stats->rx_packets = dpaa_mac_cntr_update_return(&mac->rx_packets, pkts);
	stats->rx_bytes = dpaa_mac_cntr_update_return(&mac->rx_bytes, bytes);
	goto ok;
err:
ok:
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
	return stats;
#else
	return;
#endif
}
#endif

static struct net_device_ops dpaa_netdev_ops = {
	.ndo_start_xmit = dpaa_start_xmit,
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
	.ndo_get_stats = dpaa_get_stats,
#else
	.ndo_get_stats64 = dpaa_get_stats64,
#endif
};

static void dpaa_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pp)
{
	u8 fc;
	u16 lcladv;
	dpaa_ndev_t *netdev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
	struct ethtool_cmd cmd;
	if ((phy_ethtool_gset(ndev->phydev, &cmd))) {
		goto err;
	}
	lcladv = ethtool_adv_to_mii_adv_t(cmd.advertising);
#else
	struct ethtool_link_ksettings ks;
	phy_ethtool_ksettings_get(ndev->phydev, &ks);
	lcladv = linkmode_adv_to_mii_adv_t(ks.link_modes.advertising);
#endif
	netdev = netdev_priv(ndev);
	pp->autoneg = (netdev->flags & DPAA_NDEV_F_PAUSE_AUTONEG) ? 1 : 0;
	fc = mii_resolve_flowctrl_fdx(lcladv, lcladv);
	pp->rx_pause = fc & FLOW_CTRL_RX ? 1 : 0;
	pp->tx_pause = fc & FLOW_CTRL_TX ? 1 : 0;
	return;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
err:
	return;
#endif
}

static int dpaa_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pp)
{
	int rc, cap;
	u16 adv;
	dpaa_ndev_t *netdev;
	struct phy_device *phydev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
	struct ethtool_cmd cmd;
#else
	struct ethtool_link_ksettings ks;
#endif
	phydev = ndev->phydev;
	netdev = netdev_priv(ndev);
	netdev->flags = (netdev->flags & ~DPAA_NDEV_F_PAUSE_AUTONEG) |
			(pp->autoneg ? DPAA_NDEV_F_PAUSE_AUTONEG : 0);

	cap = (pp->rx_pause ? FLOW_CTRL_RX : 0) | (pp->tx_pause ? FLOW_CTRL_TX : 0);
	adv = mii_advertise_flowctrl(cap);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
	if ((rc = phy_ethtool_gset(phydev, &cmd))) {
		goto err;
	}
	cmd.advertising = (cmd.advertising & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause)) |
			  ((adv & ADVERTISE_PAUSE_CAP) ? ADVERTISED_Pause : 0) |
			  ((adv & ADVERTISE_PAUSE_ASYM) ? ADVERTISED_Asym_Pause : 0);
	if ((rc = phy_ethtool_sset(phydev, &cmd))) {
		goto err;
	}
#else
	phy_ethtool_ksettings_get(phydev, &ks);
	linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, ks.link_modes.advertising,
			 adv & ADVERTISE_PAUSE_CAP);
	linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, ks.link_modes.advertising,
			 adv & ADVERTISE_PAUSE_ASYM);
	if ((rc = phy_ethtool_ksettings_set(phydev, &ks))) {
		goto err;
	}
#endif
	return 0;
err:
	return rc;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
static int dpaa_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
	return phy_ethtool_gset(ndev->phydev, cmd);
}

static int dpaa_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
	return phy_ethtool_sset(ndev->phydev, cmd);
}

static struct ethtool_ops dpaa_ethtool_ops = {
	.get_pauseparam = dpaa_get_pauseparam,
	.set_pauseparam = dpaa_set_pauseparam,
	.get_settings   = dpaa_get_settings,
	.set_settings   = dpaa_set_settings,
};
#else
static int dpaa_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *ks)
{
	phy_ethtool_ksettings_get(ndev->phydev, ks);

	return 0;
}

static int dpaa_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *ks)
{
	return phy_ethtool_ksettings_set(ndev->phydev, ks);
}

static struct ethtool_ops dpaa_ethtool_ops = {
	.get_pauseparam	    = dpaa_get_pauseparam,
	.set_pauseparam     = dpaa_set_pauseparam,
	.get_link_ksettings = dpaa_get_link_ksettings,
	.set_link_ksettings = dpaa_set_link_ksettings,
};
#endif

static void dpaa_pcpu_flow_poll(flow_t *flow)
{
	u8 cmd_verb;
	dpaa_ndev_t *netdev;
	unsigned int nr, num, to_clean;
	struct bman_swp *bm_swp;
	struct bm_mc_command *c;
	const struct bm_mc_result __iomem *r;

	netdev = netdev_priv(flow->netdev->ndev);
	bm_swp = this_cpu_ptr(&cpu_dpaa_io_infos)->bm_swp;

	to_clean = min(flow->q_used, flow->qwt);
	while (to_clean) {
		nr = min(to_clean, 8u);
		while (!(c = bman_swp_cr_next(bm_swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= BM_MCC_VERB_CMD_ACQUIRE | nr;
		c->acquire.bpid = netdev->tx_clean_bp->bpid;
		bman_swp_cr_submit(bm_swp, c, cmd_verb);
		while (!(r = bman_swp_rr_next(bm_swp, cmd_verb))) {
			continue;
		}
		if (WARN_ON((r->verb & BM_MCC_VERB_CMD_MASK) != BM_MCC_VERB_CMD_ACQUIRE)) {
			break;
		}
		num = r->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
		BUG_ON(num > nr);
		to_clean -= num;
		flow->q_used -= num;
		if (num < nr) {
			break;
		}
	}
}


static unsigned int dpaa_netdev_tx_burst(struct net_device *ndev, struct sk_buff **skbs, unsigned int nr)
{
	u8 cmd_verb;
	int rc;
	dpaa_fq_t *fq;
	dma_addr_t baddr;
	dpaa_ndev_t *netdev;
	struct qm_fd fd;
	unsigned int i, txq;
	struct sk_buff *skb;
	struct qman_swp *swp;
	struct qm_eqcr_entry __iomem *eq;

	netdev = netdev_priv(ndev);
	swp = this_cpu_ptr(&cpu_dpaa_io_infos)->qm_swp;
	for (i = 0; i < nr; i++) {
		if (!(eq = qman_swp_eqcr_next(swp, &cmd_verb))) {
			break;
		}
		skb = skbs[i];
		baddr = sb_skbh_to_baddr(skb->head);
		txq = skb_get_queue_mapping(skb);
		if (txq >= ndev->real_num_tx_queues) {
			txq = txq % ndev->real_num_tx_queues;
		}
		fq = &netdev->tx_fqs[txq];
		qman_swp_cmd_zero(eq);
		cmd_verb |= QM_EQCR_VERB_CMD_ENQUEUE;
		eq->fqid = cpu_to_be32(fq->fqid);
		fd.opaque_addr = 0;
		fd.opaque = 0;
		fd.addr = baddr;
		fd.offset = skb_headroom(skb);
		fd.length20 = skb->len;
		fd.bpid = use_tx_conf ? 0 : netdev->tx_clean_bp->bpid;
		fd.format = qm_fd_contig;
		eq->fd.opaque_addr = cpu_to_be64(fd.opaque_addr);
		eq->fd.opaque = cpu_to_be32(fd.opaque);
		rc = qman_swp_eqcr_submit(swp, eq, cmd_verb, false);
		BUG_ON(rc);
	}

	if (i) {
		rc = qman_swp_eqcr_submit(swp, NULL, 0, true);
		BUG_ON(rc);
	}

	return i;
}

static int dpaa_netdev_ioctl(struct net_device *ndev, proto_handle_t *handle, proto_rxd_t *rxd)
{
	int i, rc;
	uint64_t tx_pkts, rx_pkts;
	const char *arg_cmd;
	dpaa_ndev_t *netdev;
	t_FmMacStatistics stat;
	struct mac_device *mdev;
	struct fm_mac_dev *fm_mdev;

	if (!(arg_cmd = proto_get_str(&rxd->buf, &rxd->len))) {
		rc = -EINVAL;
		goto err;
	}

	netdev = netdev_priv(ndev);
	mdev = netdev->mac->mdev;
	fm_mdev = mdev->get_mac_handle(mdev);

	if (!strcmp(arg_cmd, "get_stats")) {
		if ((rc = -GET_ERROR_TYPE(CALL_KSYM_PTR(FM_MAC_GetStatistics, fm_mdev, &stat)))) {
			goto err;
		}
		tx_pkts = stat.ifOutPkts;
		rx_pkts = stat.ifInPkts;
		if (netdev->mac->type == FM_MAC_DTSEC) {
			rx_pkts += stat.reStatPause + stat.ifInErrors;
		}
		cmd_pr_info(handle, "<tx> pkts=%llu\n", tx_pkts);
		cmd_pr_info(handle, "<rx> pkts=%llu\n", rx_pkts);
		for (i = 0; i < sizeof(stat) / sizeof(u64); i++) {
			cmd_pr_info(handle, "stats[%d]=%llu\n", i, ((u64 *)&stat)[i]);
		}
	} else if (!strcmp(arg_cmd, "clr_stats")) {
		if ((rc = -GET_ERROR_TYPE(CALL_KSYM_PTR(FM_MAC_ResetCounters, fm_mdev)))) {
			goto err;
		}
	} else {
		cmd_pr_err(handle, "ERR: invalid command \"%s\"\n", arg_cmd);
		rc = -EINVAL;
	}

	return 0;
err:
	return rc;
}

static int dpaa_netdev_force_stop(struct net_device *ndev)
{
	u32 reg;
#ifdef QORIQ
	t_Dtsec *dtsec;
	t_Tgec *tgec;
#endif
	t_Memac *memac;
	dpaa_ndev_t *netdev;
	struct mac_device *mdev;
	struct fm_mac_dev *fm_mdev;

	netdev = netdev_priv(ndev);
	mdev = netdev->mac->mdev;
	fm_mdev = mdev->get_mac_handle(mdev);

	switch (netdev->mac->type) {
#ifdef QORIQ
	case FM_MAC_DTSEC:
		dtsec = (t_Dtsec *)fm_mdev;
		reg = ioread32be(&dtsec->p_MemMap->maccfg1);
		reg |= MACCFG1_LOOPBACK;
		iowrite32be(reg, &dtsec->p_MemMap->maccfg1);
		msleep(MSEC_PER_SEC);
		reg &= ~MACCFG1_LOOPBACK;
		iowrite32be(reg, &dtsec->p_MemMap->maccfg1);
		break;

	case FM_MAC_XGEC:
		tgec = (t_Tgec *)fm_mdev;
		reg = ioread32be(&tgec->p_MemMap->command_config);
		reg |= CMD_CFG_LOOPBACK_EN;
		iowrite32be(reg, &tgec->p_MemMap->command_config);
		msleep(MSEC_PER_SEC);
		reg &= ~CMD_CFG_LOOPBACK_EN;
		iowrite32be(reg, &tgec->p_MemMap->command_config);
		break;
#endif
	case FM_MAC_MEMAC:
		memac = (t_Memac *)fm_mdev;
		reg = ioread32be(&memac->p_MemMap->command_config);
		reg |= CMD_CFG_LOOPBACK_EN;
		iowrite32be(reg, &memac->p_MemMap->command_config);
		msleep(MSEC_PER_SEC);
		reg &= ~CMD_CFG_LOOPBACK_EN;
		iowrite32be(reg, &memac->p_MemMap->command_config);
		break;

	default:
		goto err;
	}

	return 0;
err:
	return -ENODEV;
}

static netdev_priv_ops_t dpaa_ndev_ops = {
	.pcpu_flow_poll    = dpaa_pcpu_flow_poll,
	.netdev_tx_burst   = dpaa_netdev_tx_burst,
	.netdev_ioctl      = dpaa_netdev_ioctl,
	.netdev_force_stop = dpaa_netdev_force_stop,
};

static int dpaa_netdev_name(struct platform_device *pdev, char *name)
{
	int rc;
	u32 fm_id, mac_id;
	struct device_node *dn;

	if (!(dn = of_parse_phandle(pdev->dev.of_node, "fsl,fman-mac", 0))) {
		rc = -ENODEV;
		goto err;
	}

	if (of_property_read_u32(dn, "cell-index", &mac_id)
	||  of_property_read_u32(dn->parent, "cell-index", &fm_id)) {
		rc = -EINVAL;
		goto err_prop;
	}

	mac_id++;
	fm_id++;
	snprintf(name, IFNAMSIZ, "fm%u-mac%u", fm_id, mac_id);
	of_node_put(dn);

	return 0;

err_prop:
	of_node_put(dn);
err:
	return rc;
}

static dpaa_mac_t *dpaa_mac_init(struct platform_device *pdev)
{
	int rc;
	dpaa_mac_t *mac;
	struct mac_device *mdev;
	struct device_node *dn;

	if (!(mac = kzalloc(sizeof(*mac), GFP_KERNEL)) || MTRACE_KMEM_ADD(mac)) {
		dev_err(&pdev->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	if (IS_ERR(mdev = dpa_mac_probe(pdev))) {
		dev_err(&pdev->dev, "failed to probe mac\n");
		rc = PTR_ERR(mdev);
		goto err_probe;
	}
	mac->mdev = mdev;

	dn = dev_of_node(mdev->dev);
	if (of_device_is_compatible(dn, "fsl,fman-dtsec")
	||  of_device_is_compatible(dn, "fsl,fman-1g-mac")) {
		mac->type = FM_MAC_DTSEC;
	} else if (of_device_is_compatible(dn, "fsl,fman-xgec")
	||         of_device_is_compatible(dn, "fsl,fman-10g-mac")) {
		mac->type = FM_MAC_XGEC;
	} else if (of_device_is_compatible(dn, "fsl,fman-memac")) {
		mac->type = FM_MAC_MEMAC;
	} else {
		dev_err(&pdev->dev, "unsupported mac type\n");
		rc = -EINVAL;
		goto err_type;
	}

	if ((rc = fm_mac_set_promiscuous(mdev->get_mac_handle(mdev), true))) {
		dev_err(mdev->dev, "failed to enable promiscuous\n");
		goto err_prom;
	}

	return mac;

err_prom:
err_type:
	put_device(mdev->dev);
err_probe:
	MTRACE_KMEM_DEL(mac);
	kfree(mac);
err:
	return ERR_PTR(rc);
}

static void dpaa_mac_clean(struct platform_device *pdev, dpaa_mac_t *mac)
{
	put_device(mac->mdev->dev);
	MTRACE_KMEM_DEL(mac);
	kfree(mac);
}

static dpaa_ch_t *dpaa_ch_init(struct platform_device *pdev)
{
	int rc;
	u32 chid;
	dpaa_ch_t *ch;

	if (!(ch = kzalloc(sizeof(*ch), GFP_KERNEL)) || MTRACE_KMEM_ADD(ch)) {
		dev_err(&pdev->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	if ((rc = qman_alloc_pool(&chid))) {
		dev_err(&pdev->dev, "failed to alloc pool\n");
		goto err_chid;
	}
	ch->chid = chid;

	return ch;

err_chid:
	MTRACE_KMEM_DEL(ch);
	kfree(ch);
err:
	return ERR_PTR(rc);
}

static void dpaa_ch_clean(struct platform_device *pdev, dpaa_ch_t *ch)
{
	qman_release_pool(ch->chid);
	MTRACE_KMEM_DEL(ch);
	kfree(ch);
}

static dpaa_bp_t *dpaa_bp_init(struct platform_device *pdev, size_t sz, unsigned int nr)
{
	u8 cmd_verb;
	int rc;
	u32 bpid;
	u64 bufs[8];
	dpaa_bp_t *bp;
	dma_addr_t baddr;
	unsigned int i, n, cnt, num;
	struct sk_buff *skb;
	struct bman_swp *swp;
	struct bm_buffer bm_buf;
	struct net_device *ndev;
	struct bm_mc_command *c;
	struct bm_rcr_entry *rcr;
	const struct bm_mc_result __iomem *r;

	if ((sz - DPAA_FRM_DATA_ALIGN) < dpa_get_max_frm()) {
		dev_err(&pdev->dev, "bp size smaller than maximum frame size\n");
		rc = -EINVAL;
		goto err;
	}

	if (!(bp = kzalloc(sizeof(*bp), GFP_KERNEL)) || MTRACE_KMEM_ADD(bp)) {
		dev_err(&pdev->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	if ((rc = bman_alloc_bpid(&bpid))) {
		dev_err(&pdev->dev, "failed to alloc bpid\n");
		goto err_bpid;
	}
	bp->bpid = bpid;
	bp->sz = sz;

	swp = this_cpu_ptr(&cpu_dpaa_io_infos)->bm_swp;
	ndev = dev_get_drvdata(&pdev->dev);
	cnt = 0;
	n = 8;
	while (n) {
		while (!(c = bman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= BM_MCC_VERB_CMD_ACQUIRE | n;
		c->acquire.bpid = bp->bpid;
		bman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = bman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		if (WARN_ON((r->verb & BM_MCC_VERB_CMD_MASK) != BM_MCC_VERB_CMD_ACQUIRE)) {
			break;
		}
		num = r->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
		BUG_ON(num > n);
		cnt += num;
		n = num ? : n / 2;
	}

	if (cnt) {
		dev_warn(&pdev->dev, "bp %u: drained %u bufs\n", bp->bpid, cnt);
	}

	cnt = 0;
	while (nr) {
		n = min(nr, 8u);
		for (i = 0; i < n; i++) {
			if (!(skb = sb_alloc_skb(ndev, sz, GFP_KERNEL | GFP_DMA))
			||  MTRACE_SB_SKB_ADD(skb)) {
				while (i--) {
					baddr = bufs[i];
					skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
					BUG_ON(!skb);
					MTRACE_SB_SKB_DEL(skb);
					sb_kfree_skb(skb);
				}
				rc = -ENOMEM;
				goto err_alloc;
			}
			baddr = sb_skbh_to_baddr(skb->head);
			bufs[i] = baddr;
		}
		while (!(rcr = bman_swp_rcr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= BM_RCR_VERB_CMD_BPID_SINGLE | n;
		bm_buf.opaque = 0;
		bm_buf.bpid = bp->bpid;
		for (i = 0; i < n; i++) {
			bm_buf.addr = bufs[i];
			rcr->bufs[i].opaque = cpu_to_be64(bm_buf.opaque);
		}
		rc = bman_swp_rcr_submit(swp, rcr, cmd_verb, (n == nr) ? true : false);
		BUG_ON(rc);
		nr -= n;
		cnt += n;
	}

	if (cnt) {
		dev_info(&pdev->dev, "bp %u: released %u bufs\n", bp->bpid, cnt);
	}

	return bp;

err_alloc:
	n = 8;
	while (n) {
		while (!(c = bman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= BM_MCC_VERB_CMD_ACQUIRE | n;
		c->acquire.bpid = bp->bpid;
		bman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = bman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		if (WARN_ON((r->verb & BM_MCC_VERB_CMD_MASK) != BM_MCC_VERB_CMD_ACQUIRE)) {
			break;
		}
		num = r->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
		BUG_ON(num > n);
		for (i = 0; i < num; i++) {
			bm_buf.opaque = be64_to_cpu(r->acquire.bufs[i].opaque);
			baddr = bm_buf.addr;
			skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
			BUG_ON(!skb);
			MTRACE_SB_SKB_DEL(skb);
			sb_kfree_skb(skb);
		}
		n = num ? : n / 2;
	}
	bman_release_bpid(bp->bpid);
err_bpid:
	MTRACE_KMEM_DEL(bp);
	kfree(bp);
err:
	return ERR_PTR(rc);
}

static void dpaa_bp_clean(struct platform_device *pdev, dpaa_bp_t *bp, unsigned int nr)
{
	u8 cmd_verb;
	dma_addr_t baddr;
	unsigned int i, n, cnt, num;
	struct sk_buff *skb;
	struct bman_swp *swp;
	struct bm_buffer bm_buf;
	struct net_device *ndev;
	struct bm_mc_command *c;
	const struct bm_mc_result __iomem *r;

	swp = this_cpu_ptr(&cpu_dpaa_io_infos)->bm_swp;
	ndev = dev_get_drvdata(&pdev->dev);
	cnt = 0;
	n = 8;
	while (n) {
		while (!(c = bman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= BM_MCC_VERB_CMD_ACQUIRE | n;
		c->acquire.bpid = bp->bpid;
		bman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = bman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		if (WARN_ON((r->verb & BM_MCC_VERB_CMD_MASK) != BM_MCC_VERB_CMD_ACQUIRE)) {
			break;
		}
		num = r->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
		BUG_ON(num > n);
		for (i = 0; i < num; i++) {
			bm_buf.opaque = be64_to_cpu(r->acquire.bufs[i].opaque);
			baddr = bm_buf.addr;
			skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
			BUG_ON(!skb);
			MTRACE_SB_SKB_DEL(skb);
			sb_kfree_skb(skb);
		}
		cnt += num;
		n = num ? : n / 2;
	}

	if (cnt) {
		dev_info(&pdev->dev, "bp %u: drained %u bufs\n", bp->bpid, cnt);
	}
	WARN_ON(cnt != nr);

	bman_release_bpid(bp->bpid);
	MTRACE_KMEM_DEL(bp);
	kfree(bp);
}

static void dpaa_rx_dqrr(struct qman_swp *swp, struct dpaa_fq *fq,
			 const struct qm_dqrr_entry __iomem *dq)
{

	u8 cmd_verb;
	int rc;
	dma_addr_t baddr;
	struct qm_fd fd;
	struct sk_buff *skb;
	struct bman_swp *bm_swp;
	struct bm_buffer bm_buf;
	struct net_device *ndev;
	struct bm_rcr_entry *rcr;

	ndev = dev_get_drvdata(&fq->pdev->dev);

	fd.opaque_addr = be64_to_cpu(dq->fd.opaque_addr);
	fd.opaque = be32_to_cpu(dq->fd.opaque);
	BUG_ON(fd.format != qm_fd_contig);
	bm_swp = this_cpu_ptr(&cpu_dpaa_io_infos)->bm_swp;
	baddr = fd.addr;
	skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
	BUG_ON(!skb);
	skb_reserve(skb, fd.offset);
	skb_put(skb, fd.length20);
	skb_reset_mac_header(skb);
	skb_pull(skb, ETH_HLEN);
	sb_netdev_receive(skb, ndev);

	BUG_ON(skb_shared(skb));
	skb_trim(skb, 0);
	skb_reserve(skb, -skb_headroom(skb));
	while (!(rcr = bman_swp_rcr_next(bm_swp, &cmd_verb))) {
		continue;
	}
	cmd_verb |= BM_RCR_VERB_CMD_BPID_SINGLE | 1;
	bm_buf.opaque = 0;
	bm_buf.bpid = fd.bpid;
	bm_buf.addr = baddr;
	rcr->bufs[0].opaque = cpu_to_be64(bm_buf.opaque);
	rc = bman_swp_rcr_submit(bm_swp, rcr, cmd_verb, true);
	BUG_ON(rc);
}

static void dpaa_rx_err_dqrr(struct qman_swp *swp, struct dpaa_fq *fq,
			     const struct qm_dqrr_entry __iomem *dq)
{
	u8 cmd_verb;
	int rc;
	struct qm_fd fd;
	struct bman_swp *bm_swp;
	struct bm_buffer bm_buf;
	struct bm_rcr_entry *rcr;

	fd.opaque_addr = be64_to_cpu(dq->fd.opaque_addr);
	fd.opaque = be32_to_cpu(dq->fd.opaque);
	BUG_ON(fd.format != qm_fd_contig);
	bm_swp = this_cpu_ptr(&cpu_dpaa_io_infos)->bm_swp;
	while (!(rcr = bman_swp_rcr_next(bm_swp, &cmd_verb))) {
		continue;
	}
	cmd_verb |= BM_RCR_VERB_CMD_BPID_SINGLE | 1;
	bm_buf.opaque = 0;
	bm_buf.bpid = fd.bpid;
	bm_buf.addr = fd.addr;
	rcr->bufs[0].opaque = cpu_to_be64(bm_buf.opaque);
	rc = bman_swp_rcr_submit(bm_swp, rcr, cmd_verb, true);
	BUG_ON(rc);
}

static void dpaa_tx_err_dqrr(struct qman_swp *swp, struct dpaa_fq *fq,
			     const struct qm_dqrr_entry __iomem *dq)
{
	dma_addr_t baddr;
	struct qm_fd fd;
	struct sk_buff *skb;
	struct net_device *ndev;

	ndev = dev_get_drvdata(&fq->pdev->dev);

	fd.opaque_addr = be64_to_cpu(dq->fd.opaque_addr);
	fd.opaque = be32_to_cpu(dq->fd.opaque);
	baddr = fd.addr;
	skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
	BUG_ON(!skb);
	sb_kfree_skb(skb);
}

static void dpaa_tx_conf_dqrr(struct qman_swp *swp, struct dpaa_fq *fq,
			      const struct qm_dqrr_entry __iomem *dq)
{
	dma_addr_t baddr;
	struct qm_fd fd;
	struct sk_buff *skb;
	struct net_device *ndev;

	ndev = dev_get_drvdata(&fq->pdev->dev);

	fd.opaque_addr = be64_to_cpu(dq->fd.opaque_addr);
	fd.opaque = be32_to_cpu(dq->fd.opaque);
	baddr = fd.addr;
	skb = sb_skbh_to_skb(sb_baddr_to_skbh(ndev, baddr));
	BUG_ON(!skb);
	sb_kfree_skb(skb);
}

#define FMAN_CONTEXTA_OVFQ	0x80000000
#define FMAN_CONTEXTA_A2V	0x10000000
#define FMAN_CONTEXTA_OVOM	0x02000000
#define FMAN_CONTEXTA_EBD	0x80000000

static dpaa_fq_t *dpaa_fqs_init(struct platform_device *pdev, struct qman_swp *swp,
				u32 count, u32 conf, u16 channel, qman_cb_dqrr_t dqrr)
{
	u8 cmd_verb;
	u32 fqid;
	dpaa_fq_t *fqs;
	unsigned int i, j, tmp;
	struct qm_fqd fqd;
	dpaa_fq_entry_t *e;
	struct qm_mc_command *c;
	const struct qm_mc_result __iomem *r;

	if (!(fqs = kzalloc(sizeof(*fqs) * count, GFP_KERNEL)) || MTRACE_KMEM_ADD(fqs)) {
		dev_err(&pdev->dev, "%s(): failed to alloc memory\n", __func__);
		goto err;
	}

	if ((qman_alloc_fqid_range(&fqid, count, count, 0)) < count) {
		dev_err(&pdev->dev, "failed to alloc %u fqids\n", count);
		goto err_fqid;
	}

	for (i = 0; i < count; i++) {
		fqs[i].fqid = fqid + i;
		fqs[i].pdev = pdev;
		fqs[i].dqrr = dqrr;
		j = jhash(&fqs[i].fqid, sizeof(fqs[i].fqid), 0);
		dpaa_fq_table_for_each_entry(e, j, tmp) {
			if (atomic_cmpxchg_acquire(&e->key_fqid, 0, (long)fqs[i].fqid)) {
				continue;
			}
			e->fq = &fqs[i];
			break;
		}
		if (!e) {
			dev_err(&pdev->dev, "failed to alloc entries in dpaa_fq table\n");
			goto err_init;
		}
		while (!(c = qman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QM_MCC_VERB_INITFQ_SCHED;
		c->initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA);
		c->initfq.fqid = cpu_to_be32(fqs[i].fqid);
		c->initfq.count = cpu_to_be16(0);
		if (dqrr) {
			fqd.dest.channel = channel;
			fqd.dest.wq = 4;
			fqd.context_b = 0;
			fqd.context_a.hi = 0;
			fqd.context_a.lo = 0;
			c->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_FQCTRL);
			c->initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
		} else {
			fqd.dest.channel = channel;
			fqd.dest.wq = 3;
			if (!conf) {
				fqd.context_b = 0;
				fqd.context_a.hi = FMAN_CONTEXTA_OVFQ | FMAN_CONTEXTA_A2V | FMAN_CONTEXTA_OVOM;
				fqd.context_a.lo = FMAN_CONTEXTA_EBD;
			} else {
				fqd.context_b = conf + i;
				fqd.context_a.hi = FMAN_CONTEXTA_OVFQ;
				fqd.context_a.lo = 0;
			}
		}
		c->initfq.fqd.dest_wq = cpu_to_be16(fqd.dest_wq);
		c->initfq.fqd.context_b = cpu_to_be32(fqd.context_b);
		c->initfq.fqd.context_a.opaque = cpu_to_be64(fqd.context_a.opaque);
		qman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = qman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		if (r->result != QM_MCR_RESULT_OK) {
			dev_err(&pdev->dev, "failed to init fq %u\n", fqs[i].fqid);
			e->fq = NULL;
			atomic_set_release(&e->key_fqid, 0);
			goto err_init;
		}
	}

	return fqs;

err_init:
	while (i--) {
		while (!(c = qman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QM_MCC_VERB_ALTER_RETIRE;
		c->alterfq.fqid = cpu_to_be32(fqs[i].fqid);
		qman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = qman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		WARN_ON(r->result != QM_MCR_RESULT_OK);
		while (!(c = qman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QM_MCC_VERB_ALTER_OOS;
		c->alterfq.fqid = cpu_to_be32(fqs[i].fqid);
		qman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = qman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		WARN_ON(r->result != QM_MCR_RESULT_OK);
		j = jhash(&fqs[i].fqid, sizeof(fqs[i].fqid), 0);
		dpaa_fq_table_for_each_entry(e, j, tmp) {
			if (atomic_read_acquire(&e->key_fqid) != (long)fqs[i].fqid) {
				continue;
			}
			e->fq = NULL;
			atomic_set_release(&e->key_fqid, 0);
			break;
		}
		BUG_ON(!e);
	}
	qman_release_fqid_range(fqid, count);
err_fqid:
	MTRACE_KMEM_DEL(fqs);
	kfree(fqs);
err:
	return NULL;
}

static void dpaa_fqs_clean(struct platform_device *pdev, struct qman_swp *swp,
			   dpaa_fq_t *fqs, u32 count)
{
	u8 cmd_verb;
	unsigned int i, j, tmp;
	dpaa_fq_entry_t *e;
	struct qm_mc_command *c;
	const struct qm_mc_result __iomem *r;

	for (i = 0; i < count; i++) {
		while (!(c = qman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QM_MCC_VERB_ALTER_RETIRE;
		c->alterfq.fqid = cpu_to_be32(fqs[i].fqid);
		qman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = qman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		WARN_ON(r->result != QM_MCR_RESULT_OK);
		while (!(c = qman_swp_cr_next(swp, &cmd_verb))) {
			continue;
		}
		cmd_verb |= QM_MCC_VERB_ALTER_OOS;
		c->alterfq.fqid = cpu_to_be32(fqs[i].fqid);
		qman_swp_cr_submit(swp, c, cmd_verb);
		while (!(r = qman_swp_rr_next(swp, cmd_verb))) {
			continue;
		}
		WARN_ON(r->result != QM_MCR_RESULT_OK);
		j = jhash(&fqs[i].fqid, sizeof(fqs[i].fqid), 0);
		dpaa_fq_table_for_each_entry(e, j, tmp) {
			if (atomic_read_acquire(&e->key_fqid) != (long)fqs[i].fqid) {
				continue;
			}
			e->fq = NULL;
			atomic_set_release(&e->key_fqid, 0);
			break;
		}
		BUG_ON(!e);
	}

	qman_release_fqid_range(fqs[0].fqid, count);
	MTRACE_KMEM_DEL(fqs);
	kfree(fqs);
}

static int dpaa_netdev_fqs_init(dpaa_ndev_t *netdev)
{
	int rc;
	struct qman_swp *swp;
	struct net_device *ndev;
	struct platform_device *pdev;

	ndev = netdev->ndev;
	pdev = to_platform_device(ndev->dev.parent);
	swp = this_cpu_ptr(&cpu_dpaa_io_infos)->qm_swp;

	if (IS_ERR(netdev->rx_fqs = dpaa_fqs_init(pdev, swp, ndev->real_num_rx_queues,
						  0, netdev->ch->chid, dpaa_rx_dqrr))) {
		dev_err(&pdev->dev, "failed to init rx_fqs\n");
		rc = PTR_ERR(netdev->rx_fqs);
		goto err;
	}
	dev_dbg(&pdev->dev, "rx_fqs: fqids=%u:%u\n", netdev->rx_fqs[0].fqid, ndev->real_num_rx_queues);

	if (IS_ERR(netdev->rx_err_fq = dpaa_fqs_init(pdev, swp, 1, 0,
						     netdev->ch->chid, dpaa_rx_err_dqrr))) {
		dev_err(&pdev->dev, "failed to init rx_err_fq\n");
		rc = PTR_ERR(netdev->rx_err_fq);
		goto err_rx_err_fq;
	}
	dev_dbg(&pdev->dev, "rx_err_fq: fqid=%u\n", netdev->rx_err_fq->fqid);

	if (IS_ERR(netdev->tx_conf_fqs = dpaa_fqs_init(pdev, swp, ndev->real_num_tx_queues,
						       0, netdev->ch->chid, dpaa_tx_conf_dqrr))) {
		dev_err(&pdev->dev, "failed to init tx_conf_fqs\n");
		rc = PTR_ERR(netdev->tx_conf_fqs);
		goto err_tx_conf_fqs;
	}
	dev_dbg(&pdev->dev, "tx_conf_fqs: fqids=%u:%u\n", netdev->tx_conf_fqs[0].fqid, ndev->real_num_tx_queues);

	if (IS_ERR(netdev->tx_fqs = dpaa_fqs_init(pdev, swp, ndev->real_num_tx_queues,
						  use_tx_conf ? netdev->tx_conf_fqs[0].fqid : 0,
						  fm_get_tx_port_channel(netdev->mac->mdev->port_dev[TX]), NULL))) {
		dev_err(&pdev->dev, "failed to init tx_fqs\n");
		rc = PTR_ERR(netdev->tx_fqs);
		goto err_tx_fqs;
	}
	dev_dbg(&pdev->dev, "tx_fqs: fqids=%u:%u\n", netdev->tx_fqs[0].fqid, ndev->real_num_tx_queues);

	if (IS_ERR(netdev->tx_err_fq = dpaa_fqs_init(pdev, swp, 1, 0, netdev->ch->chid, dpaa_tx_err_dqrr))) {
		dev_err(&pdev->dev, "failed to init tx_err_fq\n");
		rc = PTR_ERR(netdev->tx_err_fq);
		goto err_tx_err_fq;
	}
	dev_dbg(&pdev->dev, "tx_err_fq: fqid=%u\n", netdev->tx_err_fq->fqid);

	return 0;

err_tx_err_fq:
	dpaa_fqs_clean(pdev, swp, netdev->tx_fqs, ndev->real_num_tx_queues);
err_tx_fqs:
	dpaa_fqs_clean(pdev, swp, netdev->tx_conf_fqs, ndev->real_num_tx_queues);
err_tx_conf_fqs:
	dpaa_fqs_clean(pdev, swp, netdev->rx_err_fq, 1);
err_rx_err_fq:
	dpaa_fqs_clean(pdev, swp, netdev->rx_fqs, ndev->real_num_rx_queues);
err:
	return rc;
}

static void dpaa_netdev_fqs_clean(dpaa_ndev_t *netdev)
{
	struct qman_swp *swp;
	struct net_device *ndev;
	struct platform_device *pdev;

	ndev = netdev->ndev;
	pdev = to_platform_device(ndev->dev.parent);
	swp = this_cpu_ptr(&cpu_dpaa_io_infos)->qm_swp;

	dpaa_fqs_clean(pdev, swp, netdev->tx_err_fq, 1);
	dpaa_fqs_clean(pdev, swp, netdev->tx_fqs, ndev->real_num_tx_queues);
	dpaa_fqs_clean(pdev, swp, netdev->tx_conf_fqs, ndev->real_num_tx_queues);
	dpaa_fqs_clean(pdev, swp, netdev->rx_err_fq, 1);
	dpaa_fqs_clean(pdev, swp, netdev->rx_fqs, ndev->real_num_rx_queues);
}

static void dpaa_netdev_ports_init(dpaa_ndev_t *netdev)
{
#define FMBM_FQID	GENMASK(23, 0)
#define FMBM_NIA	GENMASK(23, 0)
#define ICP_ICEOF	GENMASK(20, 16)
#define ICP_ICIOF	GENMASK(11, 8)
#define ICP_ICSZ	GENMASK(4, 0)
#define REBM_SGD	GENMASK(31, 31)
#define REBM_BSM	GENMASK(24, 16)
#define REBM_BEM	GENMASK(8, 0)
#define REBMPI_BPID	GENMASK(21, 16)
#define REBMPI_PBS	GENMASK(15, 0)
	u32 reg;
	t_FmPort *port;
	struct mac_device *mdev;
	struct fm_port_params params;
	struct fman_port_tx_bmi_regs *tx_bmi;
	struct fman_port_rx_bmi_regs *rx_bmi;

	mdev = netdev->mac->mdev;

	memset(&params, 0, sizeof(params));
	params.errq = netdev->tx_err_fq->fqid;
	params.defq = netdev->tx_conf_fqs[0].fqid;
	params.data_align = DPAA_FRM_DATA_ALIGN;
	fm_set_tx_port_params(mdev->port_dev[TX], &params);
	port = (t_FmPort *)((t_LnxWrpFmPortDev *)mdev->port_dev[TX])->h_Dev;
	BUG_ON(!port);
	tx_bmi = (struct fman_port_tx_bmi_regs *)port->port.bmi_regs;

	reg = ioread32be(&tx_bmi->fmbm_tefqid) & ~FMBM_FQID;
	reg |= FIELD_PREP(FMBM_FQID, params.errq);
	iowrite32be(reg, &tx_bmi->fmbm_tefqid);

	reg = ioread32be(&tx_bmi->fmbm_tcfqid) & ~FMBM_FQID;
	reg |= FIELD_PREP(FMBM_FQID, params.defq);
	iowrite32be(reg, &tx_bmi->fmbm_tcfqid);

	reg = ioread32be(&tx_bmi->fmbm_ticp) & ~(ICP_ICEOF | ICP_ICIOF | ICP_ICSZ);
	iowrite32be(reg, &tx_bmi->fmbm_ticp);

	memset(&params, 0, sizeof(params));
	params.errq = netdev->rx_err_fq->fqid;
	params.defq = netdev->rx_fqs[0].fqid;
	params.num_pools = 1;
	params.pool_param[0].id = netdev->rx_bp->bpid;
	params.pool_param[0].size = netdev->rx_bp->sz;
	params.data_align = DPAA_FRM_DATA_ALIGN;
	fm_set_rx_port_params(mdev->port_dev[RX], &params);
	port = (t_FmPort *)((t_LnxWrpFmPortDev *)mdev->port_dev[RX])->h_Dev;
	BUG_ON(!port);
	rx_bmi = (struct fman_port_rx_bmi_regs *)port->port.bmi_regs;

	reg = ioread32be(&rx_bmi->fmbm_refqid) & ~FMBM_FQID;
	reg |= FIELD_PREP(FMBM_FQID, params.errq);
	iowrite32be(reg, &rx_bmi->fmbm_refqid);

	reg = ioread32be(&rx_bmi->fmbm_rfqid) & ~FMBM_FQID;
	reg |= FIELD_PREP(FMBM_FQID, params.defq);
	iowrite32be(reg, &rx_bmi->fmbm_rfqid);

	reg = ioread32be(&rx_bmi->fmbm_rfne) & ~FMBM_NIA;
	reg |= FIELD_PREP(FMBM_NIA, NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME);
	iowrite32be(reg, &rx_bmi->fmbm_rfne);

	reg = ioread32be(&rx_bmi->fmbm_ricp) & ~(ICP_ICEOF | ICP_ICIOF | ICP_ICSZ);
	iowrite32be(reg, &rx_bmi->fmbm_ricp);

	reg = ioread32be(&rx_bmi->fmbm_rebm) & ~(REBM_BSM | REBM_BEM);
	reg |= FIELD_PREP(REBM_SGD, 1) | FIELD_PREP(REBM_BSM, DPAA_FRM_DATA_ALIGN);
	iowrite32be(reg, &rx_bmi->fmbm_rebm);

	reg = ioread32be(&rx_bmi->fmbm_ebmpi[0]) & ~(REBMPI_BPID | REBMPI_PBS);
	reg |= FIELD_PREP(REBMPI_BPID, params.pool_param[0].id) |
	       FIELD_PREP(REBMPI_PBS, params.pool_param[0].size);
	iowrite32be(reg, &rx_bmi->fmbm_ebmpi[0]);
}

static long dpaa_netdev_init(void *arg)
{
	long rc;
	char name[IFNAMSIZ];
	dpaa_ndev_t *netdev;
	struct net_device *ndev = NULL;
	struct platform_device *pdev = arg;

	if ((rc = dpaa_netdev_name(pdev, name))) {
		goto err;
	}

	if (!sb_netdev_in_filter(name)) {
		rc = -ENODEV;
		goto err;
	}

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 9, 0)
	arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
#else
	arch_setup_dma_ops(&pdev->dev, true);
#endif
	if ((rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))) {
		dev_err(&pdev->dev, "failed to set dma mask\n");
		goto err;
	}

	if (!(ndev = sb_netdev_alloc(sizeof(*netdev), nr_cpu_ids, 1))) {
		dev_err(&pdev->dev, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}
	SET_NETDEV_DEV(ndev, &pdev->dev);
	ndev->netdev_ops = &dpaa_netdev_ops;
	ndev->ethtool_ops = &dpaa_ethtool_ops;
	dev_set_drvdata(&pdev->dev, ndev);
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0)
	ndev->features |= NETIF_F_LLTX;
#else
	ndev->lltx = true;
#endif
	ndev->priv_flags |= IFF_TX_SKB_SHARING;
	strncpy(ndev->name, name, sizeof(name));
	netdev = netdev_priv(ndev);
	netdev->ndev = ndev;
	netdev->flags |= DPAA_NDEV_F_PAUSE_AUTONEG;

	if (IS_ERR(netdev->mac = dpaa_mac_init(pdev))) {
		dev_err(&pdev->dev, "failed to init mac\n");
		rc = PTR_ERR(netdev->mac);
		goto err_mac_init;
	}

	if (IS_ERR(netdev->ch = dpaa_ch_init(pdev))) {
		dev_err(&pdev->dev, "failed to init ch\n");
		rc = PTR_ERR(netdev->ch);
		goto err_ch_init;
	}
	dev_dbg(&pdev->dev, "ch: chid=%u\n", netdev->ch->chid);

	if (IS_ERR(netdev->rx_bp = dpaa_bp_init(pdev, DPAA_DPA_BP_SZ, skb_buf_nr))) {
		dev_err(&pdev->dev, "failed to init rx_bp\n");
		rc = PTR_ERR(netdev->rx_bp);
		goto err_rx_bp_init;
	}
	dev_dbg(&pdev->dev, "rx_bp: bpid=%u\n", netdev->rx_bp->bpid);

	if (IS_ERR(netdev->tx_clean_bp = dpaa_bp_init(pdev, 0, 0))) {
		dev_err(&pdev->dev, "failed to init tx_clean_bp\n");
		rc = PTR_ERR(netdev->tx_clean_bp);
		goto err_tx_clean_bp_init;
	}
	dev_dbg(&pdev->dev, "tx_clean_bp: bpid=%u\n", netdev->tx_clean_bp->bpid);

	if ((rc = dpaa_netdev_fqs_init(netdev))) {
		dev_err(&pdev->dev, "failed to init fqs\n");
		goto err_fqs_init;
	}

	dpaa_netdev_ports_init(netdev);

	if ((rc = sb_netdev_register(ndev))) {
		dev_err(&pdev->dev, "failed to register netdev\n");
		goto err_reg;
	}

	return 0;

err_reg:
	dpaa_netdev_fqs_clean(netdev);
err_fqs_init:
	dpaa_bp_clean(pdev, netdev->tx_clean_bp, 0);
err_tx_clean_bp_init:
	dpaa_bp_clean(pdev, netdev->rx_bp, skb_buf_nr);
err_rx_bp_init:
	dpaa_ch_clean(pdev, netdev->ch);
err_ch_init:
	dpaa_mac_clean(pdev, netdev->mac);
err_mac_init:
	sb_netdev_free(ndev);
err:
	return rc;
}

static long dpaa_netdev_clean(void *arg)
{
	dpaa_ndev_t *netdev;
	struct net_device *ndev;
	struct platform_device *pdev = arg;

	ndev = dev_get_drvdata(&pdev->dev);
	netdev = netdev_priv(ndev);

	sb_netdev_unregister(ndev);

	dpaa_netdev_fqs_clean(netdev);

	dpaa_bp_clean(pdev, netdev->tx_clean_bp, 0);

	dpaa_bp_clean(pdev, netdev->rx_bp, skb_buf_nr);

	dpaa_ch_clean(pdev, netdev->ch);

	dpaa_mac_clean(pdev, netdev->mac);

	sb_netdev_free(ndev);

	return 0;
}

static long dpaa_netdev_bind(void *arg)
{
	dpaa_ndev_t *netdev = arg;
	unsigned int cpu;
	dpaa_io_info_t *info;
	struct platform_device *pdev;

	cpu = smp_processor_id();
	info = per_cpu_ptr(&cpu_dpaa_io_infos, cpu);
	pdev = to_platform_device(netdev->ndev->dev.parent);
	qman_swp_push_set(info->qm_swp, qman_swp_push_chid_to_idx(netdev->ch->chid), true);
	dev_dbg(&pdev->dev, "bind to cpu %u\n", cpu);

	return 0;
}

static long dpaa_netdev_unbind(void *arg)
{
	dpaa_ndev_t *netdev = arg;
	unsigned int cpu;
	dpaa_io_info_t *info;
	struct platform_device *pdev;

	cpu = smp_processor_id();
	info = per_cpu_ptr(&cpu_dpaa_io_infos, cpu);
	pdev = to_platform_device(netdev->ndev->dev.parent);
	qman_swp_push_set(info->qm_swp, qman_swp_push_chid_to_idx(netdev->ch->chid), false);
	dev_dbg(&pdev->dev, "unbind from cpu %u\n", cpu);

	return 0;
}

static void dpaa_netdev_adjust_link(struct net_device *ndev)
{
	u8 fc;
	u16 lcladv, rmtadv;
	dpaa_ndev_t *netdev;
	struct phy_device *phydev;
	struct mac_device *mdev;
	struct fm_mac_dev *fm_mdev;

	phydev = ndev->phydev;
	netdev = netdev_priv(ndev);
	mdev = netdev->mac->mdev;
	fm_mdev = mdev->get_mac_handle(mdev);

	phy_print_status(phydev);

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
	lcladv = ethtool_adv_to_mii_adv_t(phydev->advertising);
	rmtadv = ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
#else
	lcladv = linkmode_adv_to_mii_adv_t(phydev->advertising);
	rmtadv = linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
#endif
	if ((phydev->autoneg == AUTONEG_ENABLE) &&
	    (netdev->flags & DPAA_NDEV_F_PAUSE_AUTONEG)) {
		fc = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
	} else {
		fc = mii_resolve_flowctrl_fdx(lcladv, lcladv);
	}

	if (fm_mac_adjust_link(fm_mdev, phydev->link ? true : false,
			       (phydev->link && (phydev->speed != SPEED_UNKNOWN)) ? phydev->speed : 0,
			       phydev->duplex == DUPLEX_FULL ? true : false)) {
		dev_err(mdev->dev, "failed to adjust mac link\n");
		goto err;
	}

	if (fm_mac_set_rx_pause_frames(fm_mdev, (fc & FLOW_CTRL_RX) ? true : false)) {
		dev_err(mdev->dev, "failed to set rx pause\n");
		goto err;
	}

	if (fm_mac_set_tx_pause_frames(fm_mdev, (fc & FLOW_CTRL_TX) ? true : false)) {
		dev_err(mdev->dev, "failed to set tx pause\n");
		goto err;
	}

	return;
err:
	return;
}

static int dpaa_netdev_phy_init(dpaa_ndev_t *netdev)
{
	int rc;
	struct mac_device *mdev;
	struct phy_device *phydev;

	mdev = netdev->mac->mdev;

	if (!(phydev = of_phy_connect(netdev->ndev, mdev->phy_node,
				      dpaa_netdev_adjust_link, 0, mdev->phy_if))) {
		dev_err(&netdev->ndev->dev, "failed to connect to phy\n");
		rc = -ENODEV;
		goto err;
	}

	return 0;
err:
	return rc;
}

static void dpaa_netdev_phy_clean(dpaa_ndev_t *netdev)
{
	phy_disconnect(netdev->ndev->phydev);
}

static int dpaa_netdev_start(dpaa_ndev_t *netdev)
{
	int i, rc;
	struct mac_device *mdev;

	mdev = netdev->mac->mdev;

	for_each_port_device(i, mdev->port_dev) {
		if ((rc = fm_port_enable(mdev->port_dev[i]))) {
			dev_err(mdev->dev, "failed to enable port %d\n", i);
			goto err;
		}
	}

	if ((rc = fm_mac_enable(mdev->get_mac_handle(mdev)))) {
		dev_err(mdev->dev, "failed to enable mac\n");
		goto err;
	}

	phy_start(netdev->ndev->phydev);

	return 0;
err:
	for_each_port_device(i, mdev->port_dev) {
		fm_port_disable(mdev->port_dev[i]);
	}
	return rc;
}

static void dpaa_netdev_stop(dpaa_ndev_t *netdev)
{
	int i;
	struct mac_device *mdev;

	mdev = netdev->mac->mdev;

	phy_stop(netdev->ndev->phydev);

	fm_mac_disable(mdev->get_mac_handle(mdev));

	for_each_port_device(i, mdev->port_dev) {
		fm_port_disable(mdev->port_dev[i]);
	}
}

static int dpaa_netdev_probe(struct platform_device *pdev)
{
	int rc;
	unsigned int cpu, last;
	dpaa_ndev_t *netdev;
	struct net_device *ndev;

	cpu = cpumask_first(worker_cpumask);
	if ((rc = work_on_cpu(cpu, dpaa_netdev_init, pdev))) {
		goto err;
	}

	ndev = dev_get_drvdata(&pdev->dev);
	netdev = netdev_priv(ndev);
	last = nr_cpu_ids;
	for_each_cpu(cpu, worker_cpumask) {
		if ((rc = work_on_cpu(cpu, dpaa_netdev_bind, netdev))) {
			last = cpu;
			goto err_bind;
		}
	}

	if ((rc = dpaa_netdev_phy_init(netdev))) {
		dev_err(&ndev->dev, "failed to init phy\n");
		goto err_phy_init;
	}

	if ((rc = dpaa_netdev_start(netdev))) {
		dev_err(&pdev->dev, "failed to start netdev\n");
		goto err_start;
	}

	if ((rc = sb_netdev_notify(ndev, NETDEV_UP, &dpaa_ndev_ops))) {
		dev_err(&pdev->dev, "failed to notify netdev\n");
		goto err_notify;
	}

	dev_dbg(&pdev->dev, "probed\n");

	return 0;

err_notify:
	dpaa_netdev_stop(netdev);
	/* Waitting for on-the-fly frames finish */
	msleep(MSEC_PER_SEC * 3);
err_start:
	dpaa_netdev_phy_clean(netdev);
err_phy_init:
err_bind:
	for_each_cpu(cpu, worker_cpumask) {
		if (cpu == last) {
			break;
		}
		work_on_cpu(cpu, dpaa_netdev_unbind, netdev);
	}
	cpu = cpumask_first(worker_cpumask);
	work_on_cpu(cpu, dpaa_netdev_clean, pdev);
err:
	return rc;
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0)
static int
#else
static void
#endif
dpaa_netdev_remove(struct platform_device *pdev)
{
	unsigned int cpu;
	dpaa_ndev_t *netdev;
	struct net_device *ndev;

	ndev = dev_get_drvdata(&pdev->dev);
	netdev = netdev_priv(ndev);

	sb_netdev_notify(ndev, NETDEV_GOING_DOWN, &dpaa_ndev_ops);

	sb_netdev_notify(ndev, NETDEV_DOWN, NULL);

	dpaa_netdev_stop(netdev);
	/* Waitting for on-the-fly frames finish */
	msleep(MSEC_PER_SEC * 3);

	dpaa_netdev_phy_clean(netdev);

	for_each_cpu(cpu, worker_cpumask) {
		work_on_cpu(cpu, dpaa_netdev_unbind, netdev);
	}

	cpu = cpumask_first(worker_cpumask);
	work_on_cpu(cpu, dpaa_netdev_clean, pdev);

	dev_dbg(&pdev->dev, "removed\n");

#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0)
	return 0;
#endif
}

static const struct of_device_id dpaa_netdev_of_match[] = {
	{ .compatible = "sb-dpaa-eth", },
	{},
};

static struct platform_driver dpaa_netdev_driver = {
	.driver = {
		.name = KBUILD_MODNAME "-netdev",
		.of_match_table = dpaa_netdev_of_match,
	},
	.probe = dpaa_netdev_probe,
	.remove = dpaa_netdev_remove,
};

static void dpaa_netdev_poll_dqrr(void *arg)
{
	int rc;
	u32 fqid;
	dpaa_fq_t *fq;
	unsigned int i, j, tmp;
	struct qman_swp *qm_swp;
	dpaa_fq_entry_t *e;
	const struct qm_dqrr_entry __iomem *dq;

	qm_swp = this_cpu_ptr(&cpu_dpaa_io_infos)->qm_swp;

	for (i = 0; i < poll_dqrr_budget; i++) {
		if (!(dq = qman_swp_dqrr_next(qm_swp))) {
			rc = qman_swp_dqrr_consume(qm_swp, NULL, true);
			BUG_ON(rc);
			break;
		}
		if ((dq->verb & QM_DQRR_VERB_MASK) == QM_DQRR_VERB_FRAME_DEQUEUE) {
			if ((dq->stat & QM_DQRR_STAT_FD_VALID)) {
				fqid = be32_to_cpu(dq->fqid);
				j = jhash(&fqid, sizeof(fqid), 0);
				dpaa_fq_table_for_each_entry(e, j, tmp) {
					if (atomic_read_acquire(&e->key_fqid) == (long)fqid) {
						break;
					}
				}
				BUG_ON(!e);
				fq = e->fq;
				fq->dqrr(qm_swp, fq, dq);
			}
		}
		rc = qman_swp_dqrr_consume(qm_swp, dq, ((i + 1) == poll_dqrr_budget) ? true : false);
		BUG_ON(rc);
	}
}

static int __init dpaa_init(void)
{
	int rc;
	unsigned int i, cpu;
	dpaa_fq_entry_t *e;
	struct {
		void **fn;
		const char *name;
	} ksyms_table[] = {
#define KSYM_TBL_ENTRY(x)	{(void **)&ksym_##x, #x}
		KSYM_TBL_ENTRY(qm_get_unused_portal),
		KSYM_TBL_ENTRY(qm_put_unused_portal),
		KSYM_TBL_ENTRY(bm_get_unused_portal),
		KSYM_TBL_ENTRY(bm_put_unused_portal),
		KSYM_TBL_ENTRY(FM_MAC_ResetCounters),
		KSYM_TBL_ENTRY(FM_MAC_GetStatistics),
	};

	if ((rc = mtrace_init())) {
		goto err;
	}

	dpaa_fq_table_for_each_entry(e, 0, i) {
		e->fq = NULL;
		atomic_set_release(&e->key_fqid, 0);
	}

	for (i = 0; i < ARRAY_SIZE(ksyms_table); i++) {
		if (!(*ksyms_table[i].fn = (void *)kallsyms_lookup_name(ksyms_table[i].name))) {
			pr_err("Failed to get address of \"%s\"\n", ksyms_table[i].name);
			rc = -EFAULT;
			goto err_lookup_syms;
		}
	}

	if (use_tx_conf) {
		dpaa_ndev_ops.pcpu_flow_poll = NULL;
	}

	for_each_cpu(cpu, worker_cpumask) {
		if ((rc = work_on_cpu(cpu, cpu_qman_swp_create, NULL))) {
			goto err_qman_swp_create;
 		}
	}

	for_each_cpu(cpu, worker_cpumask) {
		if ((rc = work_on_cpu(cpu, cpu_bman_swp_create, NULL))) {
			goto err_bman_swp_create;
 		}
	}

	if ((rc = sb_worker_reg_poll(KBUILD_MODNAME, dpaa_netdev_poll_dqrr, NULL))) {
		goto err_reg_poll;
	}

	if ((rc = platform_driver_register(&dpaa_netdev_driver))) {
		goto err_reg_drv;
	}

	return 0;

err_reg_drv:
	sb_worker_unreg_poll(KBUILD_MODNAME, dpaa_netdev_poll_dqrr, NULL);
err_reg_poll:
err_bman_swp_create:
	for_each_cpu(cpu, worker_cpumask) {
		work_on_cpu(cpu, cpu_bman_swp_destroy, NULL);
 	}
err_qman_swp_create:
	for_each_cpu(cpu, worker_cpumask) {
		work_on_cpu(cpu, cpu_qman_swp_destroy, NULL);
	}
err_lookup_syms:
	mtrace_finish();
err:
	return rc;
}

static void __exit dpaa_exit(void)
{
	unsigned int cpu;

	platform_driver_unregister(&dpaa_netdev_driver);

	sb_worker_unreg_poll(KBUILD_MODNAME, dpaa_netdev_poll_dqrr, NULL);

	for_each_cpu(cpu, worker_cpumask) {
		work_on_cpu(cpu, cpu_bman_swp_destroy, NULL);
 	}

	for_each_cpu(cpu, worker_cpumask) {
		work_on_cpu(cpu, cpu_qman_swp_destroy, NULL);
	}

	mtrace_finish();
}

module_init(dpaa_init);
module_exit(dpaa_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DPAA driver for simplebits");
